diff --git a/accountstats/views.py b/accountstats/views.py index 3997caa..78ace39 100644 --- a/accountstats/views.py +++ b/accountstats/views.py @@ -1,7 +1,7 @@ from django.shortcuts import render from django.http import JsonResponse from django.conf import settings -from datetime import datetime, timedelta +from datetime import timedelta from django.contrib.auth.decorators import login_required from django.utils.translation import gettext as _ from userportal.common import account_or_staff, Prometheus, parse_start_end @@ -46,7 +46,7 @@ def account(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_application(request, account): data = [] query_alloc = 'slurm_job:process_usage:sum_account{{account="{}", {}}}'.format(account, prom.get_filter()) @@ -98,7 +98,7 @@ def graph(request, query, stacked=True, unit=None): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_allocated(request, account): query_alloc = 'sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query_alloc, unit=_('cores')) @@ -106,7 +106,7 @@ def graph_cpu_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_used(request, account): query_used = 'sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query_used, unit=_('cores')) @@ -114,7 +114,7 @@ def graph_cpu_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_wasted(request, account): query_alloc = 'clamp_min(sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user), 0)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query_alloc, stacked=False, unit=_('cores')) @@ -122,7 +122,7 @@ def graph_cpu_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_allocated(request, account): query_alloc = 'sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter()) return graph(request, query_alloc, unit=_('GiB')) @@ -130,7 +130,7 @@ def graph_mem_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_used(request, account): query_used = 'sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter()) return graph(request, query_used, unit=_('GiB')) @@ -138,7 +138,7 @@ def graph_mem_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_wasted(request, account): query_alloc = 'clamp_min(sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user), 0) /(1024*1024*1024)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query_alloc, stacked=False, unit=_('GiB')) @@ -146,7 +146,7 @@ def graph_mem_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter')) +@parse_start_end(timedelta_start=timedelta(hours=6), minimum=prom.rate('lustre_exporter')) def graph_lustre_mdt(request, account): query = 'sum(rate(lustre_job_stats_total{{component=~"mdt",account=~"{}", {}}}[5m])) by (user, fs) !=0'.format(account, prom.get_filter()) return graph(request, query, stacked=False, unit=_('IOPS')) @@ -154,7 +154,7 @@ def graph_lustre_mdt(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter')) +@parse_start_end(timedelta_start=timedelta(hours=6), minimum=prom.rate('lustre_exporter')) def graph_lustre_ost(request, account): data = [] for i in ['read', 'write']: @@ -192,7 +192,7 @@ def graph_lustre_ost(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_allocated(request, account): query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query, unit=_('GPUs')) @@ -200,7 +200,7 @@ def graph_gpu_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_used(request, account): query = 'sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query, unit=_('GPUs')) @@ -208,7 +208,7 @@ def graph_gpu_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_wasted(request, account): query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query, stacked=False, unit=_('GPUs')) @@ -216,7 +216,7 @@ def graph_gpu_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_allocated(request, account): query = 'count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300'.format(account, prom.get_filter()) @@ -225,7 +225,7 @@ def graph_gpu_power_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_used(request, account): query = 'sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000'.format(account, prom.get_filter()) @@ -234,7 +234,7 @@ def graph_gpu_power_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_wasted(request, account): query = '(count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300) - (sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000)'.format(account, prom.get_filter(), account, prom.get_filter()) @@ -254,7 +254,7 @@ def graph_gpu_priority(request, account): # auth done in functions above -@parse_start_end(default_start=datetime.now() - timedelta(days=90), minimum=prom.rate('slurm-job-exporter')) +@parse_start_end(timedelta_start=timedelta(days=90), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_or_gpu_priority(request, account, gpu_or_cpu): data = [] if gpu_or_cpu == 'gpu': diff --git a/jobstats/views.py b/jobstats/views.py index 1c57ae6..31b2c5e 100644 --- a/jobstats/views.py +++ b/jobstats/views.py @@ -603,7 +603,7 @@ def graph_cpu(request, username, job_id): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_cpu_user(request, username): data = [] try: @@ -640,7 +640,7 @@ def graph_cpu_user(request, username): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_mem_user(request, username): data = [] try: @@ -903,7 +903,7 @@ def graph_lustre_mdt(request, username, job_id): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(hours=6)) +@parse_start_end(timedelta_start=timedelta(hours=6)) def graph_lustre_mdt_user(request, username): query = 'sum(rate(lustre_job_stats_total{{component=~"mdt",user=~"{}", {}}}[{}s])) by (operation, fs) !=0'.format(username, prom.get_filter(), prom.rate('lustre_exporter')) stats = prom.query_prometheus_multiple(query, request.start, request.end, step=request.step) @@ -979,7 +979,7 @@ def graph_lustre_ost(request, username, job_id): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(hours=6)) +@parse_start_end(timedelta_start=timedelta(hours=6)) def graph_lustre_ost_user(request, username): data = [] for i in ['read', 'write']: @@ -1067,7 +1067,7 @@ def graph_gpu_utilization(request, username, job_id): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_gpu_utilization_user(request, username): data = [] @@ -1231,7 +1231,7 @@ def graph_gpu_power(request, username, job_id): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=2)) +@parse_start_end(timedelta_start=timedelta(days=2)) def graph_gpu_power_user(request, username): data = [] diff --git a/nodes/views.py b/nodes/views.py index 9a2f4f7..a058298 100644 --- a/nodes/views.py +++ b/nodes/views.py @@ -302,7 +302,7 @@ def node_state(node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_disk_used(request, node): query_disk = '(node_filesystem_size_bytes{{{hostname_label}=~"{node}(:.*)", {filter}}} - node_filesystem_avail_bytes{{{hostname_label}=~"{node}(:.*)", {filter}}})/(1000*1000*1000)'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, @@ -332,7 +332,7 @@ def graph_disk_used(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_cpu_jobstats(request, node): query = 'sum(rate(slurm_job_core_usage_total{{{hostname_label}=~"{node}(:.*)", {filter}}}[{step}s]) / 1000000000) by (user, slurmjobid)'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, @@ -363,7 +363,7 @@ def graph_cpu_jobstats(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_cpu_node(request, node): query = 'sum by (mode)(irate(node_cpu_seconds_total{{mode!="idle",{hostname_label}=~"{node}(:.*)",{filter}}}[{step}s]))'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, @@ -395,7 +395,7 @@ def graph_cpu_node(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_memory_jobstats(request, node): query = '(sum(slurm_job_memory_usage{{{hostname_label}=~"{node}(:.*)", {filter}}}) by (user, slurmjobid))/(1024*1024*1024)'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, @@ -426,7 +426,7 @@ def graph_memory_jobstats(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_memory_node(request, node): data = [] query_apps = '(node_memory_MemTotal_bytes{{{hostname_label}=~"{node}(:.*)",{filter}}} - \ @@ -480,7 +480,7 @@ def graph_memory_node(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_ethernet_bdw(request, node): data = [] @@ -517,7 +517,7 @@ def graph_ethernet_bdw(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_infiniband_bdw(request, node): data = [] for direction in ['received', 'transmitted']: @@ -553,7 +553,7 @@ def graph_infiniband_bdw(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_disk_iops(request, node): data = [] for direction in ['reads', 'writes']: @@ -589,7 +589,7 @@ def graph_disk_iops(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_disk_bdw(request, node): data = [] for direction in ['read', 'written']: @@ -625,7 +625,7 @@ def graph_disk_bdw(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_gpu_utilization(request, node): data = [] queries = [ @@ -670,7 +670,7 @@ def graph_gpu_utilization(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_gpu_memory(request, node): query = 'slurm_job_memory_usage_gpu{{{hostname_label}=~"{node}(:.*)", {filter}}} /(1024*1024*1024)'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, @@ -703,7 +703,7 @@ def graph_gpu_memory(request, node): @login_required @staff -@parse_start_end(default_start=datetime.now() - timedelta(days=7)) +@parse_start_end(timedelta_start=timedelta(days=7)) def graph_gpu_power(request, node): query = 'slurm_job_power_gpu{{{hostname_label}=~"{node}(:.*)", {filter}}}/1000'.format( hostname_label=settings.PROM_NODE_HOSTNAME_LABEL, diff --git a/userportal/common.py b/userportal/common.py index 513ea7c..7a2a78c 100644 --- a/userportal/common.py +++ b/userportal/common.py @@ -170,9 +170,11 @@ def get_step(start, end, minimum=60): return span -def parse_start_end(default_start=datetime.now() - timedelta(days=1), default_end=datetime.now(), minimum=60): +def parse_start_end(timedelta_start=timedelta(days=1), minimum=60): """ From the GET parameters, add start and end to the request object if delta is set, it will be used to calculate the start time from now() instead of start and end + + The default parameters are evaluated when python is loaded, this why they are not a datetime object since they would not update when the decorator is called """ def decorator_wrapper(view_func): def func_wrapper(request, *args, **kwargs): @@ -185,17 +187,17 @@ def func_wrapper(request, *args, **kwargs): try: start = datetime.fromtimestamp(int(request.GET['start'])) except ValueError: - start = default_start + start = datetime.now() - timedelta_start else: - start = default_start + start = datetime.now() - timedelta_start if 'end' in request.GET: try: end = datetime.fromtimestamp(int(request.GET['end'])) except ValueError: - end = default_end + end = datetime.now() else: - end = default_end + end = datetime.now() # start and end can't be in the future if start > datetime.now(): diff --git a/usersummary/views.py b/usersummary/views.py index e4a2071..be56362 100644 --- a/usersummary/views.py +++ b/usersummary/views.py @@ -6,7 +6,7 @@ from slurm.models import JobTable from django.conf import settings from django.http import JsonResponse, HttpResponseForbidden -from datetime import datetime, timedelta +from datetime import timedelta prom = Prometheus(settings.PROMETHEUS) @@ -79,7 +79,7 @@ def user(request, username): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=90)) +@parse_start_end(timedelta_start=timedelta(days=90)) def graph_inodes(request, username, resource_type, resource_name): allocs = storage_allocations(username) for alloc in allocs: @@ -127,7 +127,7 @@ def graph_inodes(request, username, resource_type, resource_name): @login_required @user_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=90)) +@parse_start_end(timedelta_start=timedelta(days=90)) def graph_bytes(request, username, resource_type, resource_name): allocs = storage_allocations(username) for alloc in allocs: