From fa7aa93172e3b71ea49f1432d04a8b16cb4cfe16 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 21 Nov 2023 12:22:20 +0100 Subject: [PATCH 01/72] Add base default fields --- blueprints/base.json | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/blueprints/base.json b/blueprints/base.json index 2c3e5dd7..a990eff2 100644 --- a/blueprints/base.json +++ b/blueprints/base.json @@ -1,13 +1,18 @@ { "clean_na": null, "documentation": null, - "post_info": null, "frequency": null, "id": null, "timezone": null, "resample": false, "max_date": null, + "metrics": null, "min_date": null, + "processed_data_file": null, + "raw_data_file": null, + "sensors": null, "source": null, - "version": null + "sources": null, + "version": null, + "forwarding": null } \ No newline at end of file From 63fb0cb0667e923b5123a6d9e4ef65ba85ca74fc Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 22 Nov 2023 15:25:48 +0100 Subject: [PATCH 02/72] Modify device handling methods --- scdata/_config/config.py | 16 - scdata/device/__init__.py | 741 +++++++++++++++----------------------- 2 files changed, 288 insertions(+), 469 deletions(-) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index d425ff25..84de880e 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -105,22 +105,6 @@ class Config(object): # Plot out level (priority of the plot to show - 'DEBUG' or 'NORMAL') _plot_out_level = 'DEBUG' - # Alphasense sensor codes - _as_sensor_codes = { - '132': 'ASA4_CO', - '133': 'ASA4_H2S', - '130': 'ASA4_NO', - '212': 'ASA4_NO2', - '214': 'ASA4_OX', - '134': 'ASA4_SO2', - '162': 'ASB4_CO', - '133': 'ASB4_H2S',# - '130': 'ASB4_NO', # - '202': 'ASB4_NO2', - '204': 'ASB4_OX', - '164': 'ASB4_SO2' - } - ### --------------------------------------- ### ----------------ZENODO----------------- ### --------------------------------------- diff --git a/scdata/device/__init__.py b/scdata/device/__init__.py index ded83f07..b6dbd1ce 100644 --- a/scdata/device/__init__.py +++ b/scdata/device/__init__.py @@ -10,18 +10,21 @@ from os.path import join, basename from urllib.parse import urlparse from pandas import DataFrame, to_timedelta -from traceback import print_exc from numpy import nan from collections.abc import Iterable +from importlib import import_module + +from timezonefinder import TimezoneFinder +tf = TimezoneFinder() class Device(object): ''' Main implementation of the device class ''' - def __init__(self, blueprint = None, descriptor = {}): + def __init__(self, blueprint = None, source=dict(), params=dict()): ''' - Creates an instance of device. Devices are objects that contain sensors readings, metrics - (calculations based on sensors readings), and metadata such as units, dates, frequency and source + Creates an instance of device. Devices are objects that contain sensors data, metrics + (calculations based on sensors data), and metadata such as units, dates, frequency and source Parameters: ----------- @@ -30,20 +33,20 @@ def __init__(self, blueprint = None, descriptor = {}): Defines the type of device. For instance: sck_21, sck_20, csic_station, muv_station parrot_soil, sc_20_station, sc_21_station... A list of all the blueprints is found in config.blueprints_urls and accessible via the scdata.utils.load_blueprints(urls) function. - The blueprint can also be defined from the postprocessing info in SCAPI. The manual - parameter passed here is overriden by that of the API. + The blueprint can also be defined from the postprocessing info in SCAPI. + The manual parameter passed here overrides that of the API. + + source: dict() + Default: empty dict + A dictionary containing a description of how to obtain the data from the device itself. - descriptor: dict() - Default: empty: std_out('Empty dataframe, ignoring', 'WARNING') dict - A dictionary containing information about the device itself. Depending on the blueprint, this descriptor - needs to have different data. If not all the data is present, the corresponding blueprint's default will - be used + params: dict() + Default: empty dict + A dictionary containing information about the device itself. Depending on the blueprint, this params needs to have different data. If not all the data is present, the corresponding blueprint's default will be used Examples: ---------- - Device('sck_21', descriptor = {'source': 'api', 'id': '1919'}) - device with sck_21 blueprint with 1919 ID - Device(descriptor = {'source': 'api', 'id': '1919'}) + Device('sck_21', handler = {'source': 'api', 'id': '1919'}, params = {}) device with sck_21 blueprint with 1919 ID Returns @@ -51,210 +54,87 @@ def __init__(self, blueprint = None, descriptor = {}): Device object ''' - self.skip_blueprint = False + # Set handler + self.source = source + self.__set_handler__() + + # Set custom params + self.params = params + self.__set_blueprint_attrs__(config.blueprints['base']) + self.__set_params_attrs__(params) + # Start out handler object now + if self.hclass is not None: + self.handler = self.hclass(self.id) + + # Set blueprint if blueprint is not None: self.blueprint = blueprint - self.skip_blueprint = True else: - self.blueprint = 'sck_21' + if url_checker(self.handler.blueprint_url): + std_out(f'Loading postprocessing blueprint from:\n{self.handler.blueprint_url}') + self.blueprint = basename(urlparse(self.handler.blueprint_url).path).split('.')[0] - # Set attributes if self.blueprint not in config.blueprints: - raise ValueError(f'Specified blueprint {self.blueprint} is not in config') - - self.set_blueprint_attrs(config.blueprints[self.blueprint]) - self.blueprint_loaded_from_url = False - self.hardware_loaded_from_url = False - - self.description = descriptor - self.set_descriptor_attrs() - - if self.id is not None: self.id = str(self.id) - - # Postprocessing and forwarding - self.hardware_url = None - self.blueprint_url = None - self.forwarding_params = None - self.forwarding_request = None - self.meta = None - self.latest_postprocessing = None - self.processed = False - self.hardware_description = None - - # Add API handler if needed - if self.source == 'api': + raise ValueError(f'Specified blueprint {self.blueprint} is not in available blueprints') - hmod = __import__('scdata.io.device_api', fromlist = ['io.device_api']) - Hclass = getattr(hmod, self.sources[self.source]['handler']) + self.__set_blueprint_attrs__(config.blueprints[self.blueprint]) - # Create object - self.api_device = Hclass(did = self.id) - - std_out(f'Checking postprocessing info from API device') - - if self.load_postprocessing() and (self.hardware_url is None):# or self.blueprint_url is None): - if config._strict: - raise ValueError('Postprocessing could not be loaded as is incomplete and strict mode is enabled') - std_out(f'Postprocessing loaded but with problems (hardware_url: {self.hardware_url} // blueprint_url: {self.blueprint_url}', 'WARNING') - - if self.blueprint is None: - raise ValueError(f'Device {self.id} cannot be init without blueprint. Need a blueprint to proceed') - else: - std_out(f'Device {self.id} is using {self.blueprint} blueprint') - - self.readings = DataFrame() + # Init the rest of the stuff + self.data = DataFrame() self.loaded = False - self.options = dict() + self.processed = False std_out(f'Device {self.id} initialised', 'SUCCESS') - def set_blueprint_attrs(self, blueprintd): + def __set_handler__(self): + # Add handlers here + if self.source['type'] == 'api': + if 'module' in self.source: + module = self.source['module'] + else: + module = 'scdata.io.device_api' + try: + hmod = import_module(self.source['module']) + except ModuleNotFoundError: + std_out(f"Module not found: {self.source['module']}") + raise ModuleNotFoundError(f'Specified module not found') + else: + self.hclass = getattr(hmod, self.source['handler']) + # Create object + std_out(f'Setting handler as {self.hclass}') + elif self.source['type'] == 'csv': + # TODO Add handler here + std_out ('No handler for CSV yet', 'ERROR') + self.hclass = None + elif self.source['type'] == 'kafka': + # TODO Add handler here + std_out ('No handler for kafka yet', 'ERROR') + raise NotImplementedError + + def __set_blueprint_attrs__(self, blueprintd): # Set attributes for bpitem in blueprintd: - self.__setattr__(bpitem, blueprintd[bpitem]) - - def set_descriptor_attrs(self): - - # Descriptor attributes - for ditem in self.description.keys(): - if ditem not in vars(self): std_out (f'Ignoring {ditem} from input'); continue - if type(self.__getattribute__(ditem)) == dict: - self.__setattr__(ditem, - dict_fmerge(self.__getattribute__(ditem), - self.description[ditem])) - else: self.__setattr__(ditem, self.description[ditem]) - - def check_overrides(self, options = {}): - - if 'min_date' in options.keys(): - self.options['min_date'] = options['min_date'] - else: - self.options['min_date'] = self.min_date - - if 'max_date' in options.keys(): - self.options['max_date'] = options['max_date'] - else: - self.options['max_date'] = self.max_date - - if 'clean_na' in options.keys(): - self.options['clean_na'] = options['clean_na'] - else: - self.options['clean_na'] = self.clean_na - - if 'frequency' in options.keys(): - self.options['frequency'] = options['frequency'] - elif self.frequency is not None: - self.options['frequency'] = self.frequency - else: - self.options['frequency'] = '1Min' - - if 'resample' in options.keys(): - self.options['resample'] = options['resample'] - else: - self.options['resample'] = False - - std_out (f'Set following options: {self.options}') - - def load_postprocessing(self): - - if self.source != 'api': return None - - if self.sources[self.source]['handler'] != 'ScApiDevice': return None - - # Request to get postprocessing information - if self.api_device.get_device_postprocessing() is None: return None - - # Put it where it goes - try: - self.hardware_url = self.api_device.postprocessing['hardware_url'] - self.blueprint_url = self.api_device.postprocessing['blueprint_url'] - self.latest_postprocessing = self.api_device.postprocessing['latest_postprocessing'] - self.forwarding_params = self.api_device.postprocessing['forwarding_params'] - self.meta = self.api_device.postprocessing['meta'] - inc_postprocessing = False - except KeyError: - std_out('Ignoring postprocessing info as its incomplete', 'WARNING') - inc_postprocessing = True - pass - - if inc_postprocessing: return None - - # Load postprocessing info from url - if url_checker(self.hardware_url) and self.hardware_loaded_from_url == False: - - std_out(f'Loading hardware information from:\n{self.hardware_url}') - hardware_description = get_json_from_url(self.hardware_url) - - # TODO - # Add additional checks to hardware_description - - if hardware_description is not None: - self.hardware_description = hardware_description - std_out('Hardware described in url is valid', "SUCCESS") - self.hardware_loaded_from_url = True - else: - std_out("Hardware in url is not valid", 'ERROR') - self.hardware_description = None - - # Find forwarding request - if self.hardware_description is not None: - if 'forwarding' in self.hardware_description: - if self.hardware_description['forwarding'] in config.connectors: - self.forwarding_request = self.hardware_description['forwarding'] - std_out(f"Requested a {self.hardware_description['forwarding']} connector for {self.id}") - if self.forwarding_params is None: - std_out('Assuming device has never been posted. Forwarding parameters are empty', 'WARNING') - else: - std_out(f'Connector parameters are not empty: {self.forwarding_params}') - else: - std_out(f"Requested a {self.hardware_description['forwarding']} connector that is not available. Ignoring", 'WARNING') + if bpitem not in vars(self): + self.__setattr__(bpitem, blueprintd[bpitem]) + elif self.__getattribute__(bpitem) is None: + self.__setattr__(bpitem, blueprintd[bpitem]) - # Find postprocessing blueprint - if self.skip_blueprint: std_out('Skipping blueprint as it was defined in device constructor', 'WARNING') - if self.blueprint_loaded_from_url == False and not self.skip_blueprint: + def __set_params_attrs__(self, params): - # Case when there is no info stored - if url_checker(self.blueprint_url): - std_out(f'blueprint_url in platform is not empty. Loading postprocessing blueprint from:\n{self.blueprint_url}') - nblueprint = basename(urlparse(self.blueprint_url).path).split('.')[0] - else: - std_out(f'blueprint_url in platform is not valid', 'WARNING') - std_out(f'Checking if there is a blueprint_url in hardware_description') - if self.hardware_description is None: - std_out("Hardware description is not useful for blueprint", 'ERROR') - return None - if 'blueprint_url' in self.hardware_description: - std_out(f"Trying postprocessing blueprint from:\n{self.hardware_description['blueprint_url']}") - nblueprint = basename(urlparse(self.hardware_description['blueprint_url']).path).split('.')[0] - tentative_urls = url_checker(self.hardware_description['blueprint_url']) - if len(tentative_urls)>0: - self.blueprint_url = tentative_urls[0] - else: - std_out('Invalid blueprint', 'ERROR') - return None - else: - std_out('Postprocessing not possible without blueprint', 'ERROR') - return None - - std_out(f'Using hardware postprocessing blueprint: {nblueprint}') - lblueprint = get_json_from_url(self.blueprint_url) - - if lblueprint is not None: - self.blueprint = nblueprint - self.blueprint_loaded_from_url = True - self.set_blueprint_attrs(lblueprint) - self.set_descriptor_attrs() - std_out('Blueprint loaded from url', 'SUCCESS') + # Params attributes + for param in params.keys(): + if param not in vars(self): + std_out (f'Ignoring {param} from input') + continue + if type(self.__getattribute__(param)) == dict: + self.__setattr__(param, dict_fmerge(self.__getattribute__(param), params[param])) else: - std_out('Blueprint in url is not valid', 'ERROR') - return None - - return self.api_device.postprocessing + self.__setattr__(param, params[param]) + # TODO def validate(self): - if self.hardware_description is not None: return True - else: return False + return True def merge_sensor_metrics(self, ignore_empty = True): std_out('Merging sensor and metrics channels') @@ -263,8 +143,8 @@ def merge_sensor_metrics(self, ignore_empty = True): if ignore_empty: to_ignore = [] for channel in all_channels.keys(): - if channel not in self.readings: to_ignore.append(channel) - elif self.readings[channel].dropna().empty: + if channel not in self.data: to_ignore.append(channel) + elif self.data[channel].dropna().empty: std_out (f'{channel} is empty') to_ignore.append(channel) @@ -272,12 +152,68 @@ def merge_sensor_metrics(self, ignore_empty = True): return all_channels - def make_raw(self): + def add_metric(self, metric = dict()): + ''' + Add a metric to the device to be processed by a callable function + Parameters + ---------- + metric: dict + Empty dict + Description of the metric to be added. It only adds it to + Device.metrics, but does not calculate anything yet. The metric dict needs + to follow the format: + metric = { + 'metric_name': {'process': + 'args': + 'kwargs': <**kwargs for @function_name> + 'from_list': + } + } + The 'from_list' parameter is optional, and onle needed if the process is not + already available in scdata.device.process. + + For a list of available processes call help(scdata.device.process) + + Example: + -------- + metric = {'NO2_CLEAN': {'process': 'clean_ts', + 'kwargs': {'name': pollutant, + 'limits': [0, 350], + 'window_size': 5} + }} + Returns + ---------- + True if added metric + ''' + + if 'metrics' not in vars(self): + std_out(f'Device {self.id} has no metrics yet. Adding') + self.metrics = dict() + + try: + metricn = next(iter(metric.keys())) + self.metrics[metricn] = metric[metricn] + # TODO Except what? + except: + print_exc() + return False - self.processed_data_file = self.raw_data_file + std_out(f'Metric {metric} added to metrics', 'SUCCESS') return True - def load(self, options = None, path = '', convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): + def del_metric(self, metricn = ''): + if 'metrics' not in vars(self): return + if metricn in self.metrics: self.metrics.pop(metricn, None) + if metricn in self.data.columns: self.data.__delitem__(metricn) + + if metricn not in self.data and metricn not in self.metrics: + std_out(f'Metric {metricn} removed from metrics', 'SUCCESS') + return True + return False + + # TODO Fix + # async def load(self, options = dict(), path = '', convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): + async def load(self, convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): ''' Loads the device with some options @@ -313,243 +249,168 @@ def load(self, options = None, path = '', convert_units = True, only_unprocessed True if loaded correctly ''' - # Add test overrides if we have them, otherwise set device defaults - if options is not None: self.check_overrides(options) - else: self.check_overrides() - - std_out(f'Using options for device: {options}') - self.loaded = False + # # Add overrides if we have them, otherwise set device defaults + # self.__check_overrides__(options) + # std_out(f'Using options for device: {options}') - if self.source == 'csv': + if self.source['type'] == 'csv': + # TODO Review if this is necessary if follow_defaults: index_name = config._csv_defaults['index_name'] sep = config._csv_defaults['sep'] skiprows = config._csv_defaults['skiprows'] else: - index_name = self.sources[self.source]['index'] - sep = self.sources[self.source]['sep'] - skiprows = self.sources[self.source]['header_skip'] + index_name = self.source['index_name'] + sep = self.source['sep'] + skiprows = self.source['skiprows'] + # TODO Change this for a csv handler # here we don't use tzaware because we only load preprocessed data try: - self.readings = self.readings.combine_first( - read_csv_file( - file_path = join(path, - self.processed_data_file), - timezone = self.timezone, - frequency = self.options['frequency'], - clean_na = self.options['clean_na'], - index_name = index_name, - sep = sep, - skiprows = skiprows, - resample = self.options['resample']) - ) + csv_data = read_csv_file( + file_path = join(path, self.processed_data_file), + timezone = self.timezone, + frequency = self.frequency, + clean_na = self.clean_na, + resample = self.resample, + index_name = index_name, + sep = sep, + skiprows = skiprows) except FileNotFoundError: std_out(f'File not found for device {self.id} in {path}', 'ERROR') - - if self.readings is not None: - self.__convert_names__() - self.__load_wrapup__(max_amount, convert_units) - - elif 'api' in self.source: - - # Get device location - # Location data should be standard for each new device - self.api_device.get_device_lat_long() - self.api_device.get_device_alt() - - self.location = { - 'longitude': self.api_device.long, - 'latitude': self.api_device.lat, - 'altitude': self.api_device.alt - } - - self.timezone = self.api_device.get_device_timezone() - - if path == '': - # Not chached case - if only_unprocessed: - - # Override dates for post-processing - if self.latest_postprocessing is not None: - hw_latest_postprocess = localise_date(self.latest_postprocessing, - 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - # Override min loading date - self.options['min_date'] = hw_latest_postprocess - - df = self.api_device.get_device_data(self.options['min_date'], - self.options['max_date'], - self.options['frequency'], - self.options['clean_na'], - resample = self.options['resample']) - - # API Device is not aware of other csv index data, so make it here - if 'csv' in self.sources and df is not None: - df = df.reindex(df.index.rename(self.sources['csv']['index'])) - - # Combine it with readings if possible - if df is not None: - self.readings = self.readings.combine_first(df) - self.__load_wrapup__(max_amount, convert_units) - else: - # Cached case - try: - self.readings = self.readings.combine_first(read_csv_file(join(path, str(self.id) + '.csv'), - self.timezone, self.options['frequency'], - self.options['clean_na'], self.sources['csv']['index'], - resample = self.options['resample'])) - except FileNotFoundError: - std_out(f'No cached data file found for device {self.id} in {path}. Moving on', 'WARNING') - else: - self.__load_wrapup__(max_amount, convert_units) + if csv_data is not None: + self.data = self.data.combine_first(csv_data) + self.__convert_names__() + self.loaded = self.__load_wrapup__(max_amount, convert_units) + + elif self.source['type'] == 'api': + + if self.handler.method == 'async': + await self.handler.get_data( + min_date = self.min_date, + max_date = self.max_date, + freq = self.frequency, + clean_na = self.clean_na, + resample = self.resample, + only_unprocessed = only_unprocessed) + df = self.handler.data + else: + df = self.handler.get_data( + min_date = self.min_date, + max_date = self.max_date, + freq = self.frequency, + clean_na = self.clean_na, + resample = self.resample, + only_unprocessed = only_unprocessed) + + # Combine it with data if possible + if df is not None: + self.data = self.data.combine_first(df) + self.loaded = self.__load_wrapup__(max_amount, convert_units) + + elif self.source['type'] == 'kafka': + std_out('Not yet', 'ERROR') + raise NotImplementedError self.processed = False return self.loaded def __load_wrapup__(self, max_amount, convert_units): - if self.readings is not None: + if self.data is not None: self.__check_sensors__() - if not self.readings.empty: + if not self.data.empty: if max_amount is not None: + # TODO Dirty workaround std_out(f'Trimming dataframe to {max_amount} rows') - self.readings=self.readings.dropna(axis = 0, how='all').head(max_amount) + self.data=self.data.dropna(axis = 0, how='all').head(max_amount) # Only add metrics if there is something that can be potentially processed self.__fill_metrics__() - self.loaded = True - if convert_units: self.__convert_units__() + # Convert units + if convert_units: + self.__convert_units__() + return True else: - std_out('Empty dataframe in readings', 'WARNING') + std_out('Empty dataframe in data', 'WARNING') + return False def __fill_metrics__(self): std_out('Checking if metrics need to be added based on hardware info') - if self.hardware_description is None: + if self.handler.hardware_postprocessing is None: std_out(f'No hardware url in device {self.id}, ignoring') return None - # Now go through sensor versions and add them to the metrics - if 'versions' in self.hardware_description: - for version in self.hardware_description['versions']: - - from_date = version["from"] - to_date = version["to"] + for version in self.handler.hardware_postprocessing.versions: - # Do not add any metric if the from_date of the calibration is after the last_reading_at - # as there would be nothing to process - if from_date > self.api_device.last_reading_at: + # Do not add any metric if the from_date of the calibration is after the last_reading_at as there would be nothing to process + if version.from_date is not None: + if version.from_date > self.handler.last_reading_at: std_out('Postprocessing from_date is later than device last_reading_at', 'ERROR') return None - for slot in version["ids"]: - # Alphasense type - AAN 803-04 - if slot.startswith('AS'): - sensor_id = version["ids"][slot] - as_type = config._as_sensor_codes[sensor_id[0:3]] - channel = as_type[as_type.index('_')+1:] - pollutant = channel - if channel == 'OX': - pollutant = 'O3' - - # Get working and auxiliary electrode names - wen = f"ADC_{slot.strip('AS_')[:slot.index('_')]}_{slot.strip('AS_')[slot.index('_')+1]}" - aen = f"ADC_{slot.strip('AS_')[:slot.index('_')]}_{slot.strip('AS_')[slot.index('_')+2]}" - if pollutant not in self.metrics: - # Create Metric - std_out(f'Metric {pollutant} not in blueprint, ignoring.', 'WARNING') - else: - # Simply fill it up - std_out(f'{pollutant} found in blueprint metrics, filling up with hardware info') - self.metrics[pollutant]['kwargs']['we'] = wen - self.metrics[pollutant]['kwargs']['ae'] = aen - self.metrics[pollutant]['kwargs']['timezone'] = self.timezone - self.metrics[pollutant]['kwargs']['alphasense_id'] = str(sensor_id) - self.metrics[pollutant]['kwargs']['from_date'] = from_date - self.metrics[pollutant]['kwargs']['to_date'] = to_date - # Add channel name for traceability - self.metrics[f'{channel}_WE']['kwargs']['channel'] = wen - self.metrics[f'{channel}_AE']['kwargs']['channel'] = aen - - # Alphasense type - AAN 803-04 - if slot.startswith('PT'): - - sensor_id = version["ids"][slot] - - # Get working and auxiliary electrode names - pt1000plus = f"ADC_{slot.strip('PT_')[:slot.index('_')]}_{slot.strip('PT_')[slot.index('_')+1]}" - pt1000minus = f"ADC_{slot.strip('PT_')[:slot.index('_')]}_{slot.strip('PT_')[slot.index('_')+2]}" - - metric = 'ASPT1000' - if 'ASPT1000' not in self.metrics: - # Create Metric - std_out(f'Metric {metric} not in blueprint, ignoring.', 'WARNING') - else: - # Simply fill it up - std_out(f'{metric} found in blueprint metrics, filling up with hardware info') - self.metrics[metric]['kwargs']['pt1000plus'] = pt1000plus - self.metrics[metric]['kwargs']['pt1000minus'] = pt1000minus - self.metrics[metric]['kwargs']['afe_id'] = str(sensor_id) - self.metrics[metric]['kwargs']['timezone'] = self.timezone - self.metrics[metric]['kwargs']['from_date'] = from_date - self.metrics[metric]['kwargs']['to_date'] = to_date - # Add channel name for traceability - self.metrics[f'PT1000_POS']['kwargs']['channel'] = pt1000plus - - # Other metric types will go here - else: - std_out('No hardware versions found, ignoring additional metrics', 'WARNING') + metrics = self.handler.get_metrics(version) + for metric in metrics: + if metric not in self.metrics: + std_out(f'Metric {metric} not in blueprint, ignoring.', 'WARNING') + continue + self.metrics[metric]['kwargs'] = metrics[metric] def __check_sensors__(self): - remove_sensors = list() - # Remove sensor from the list if it's not in self.readings.columns + extra_sensors = list() + # Check sensors from the list that are not in self.data.columns for sensor in self.sensors: - if sensor not in self.readings.columns: - std_out(f'{sensor} not in readings columns. Marked for removal', 'INFO') - remove_sensors.append(sensor) - - if remove_sensors != []: - std_out(f'Removing sensors from device: {remove_sensors}', 'WARNING') - for sensor_to_remove in remove_sensors: - self.sensors.pop(sensor_to_remove, None) + if sensor not in self.data.columns: + std_out(f'{sensor} not in data columns', 'INFO') + extra_sensors.append(sensor) extra_columns = list() - for column in self.readings.columns: - if column not in self.sensors: extra_columns.append(column) - std_out(f'Data contains extra columns: {extra_columns}', 'INFO') + # Check columns from the data that are not in self.sensors + for column in self.data.columns: + if column not in self.sensors: + extra_columns.append(column) + std_out(f'Data contains extra columns: {extra_columns}', 'INFO') if config.data['strict_load']: std_out(f"config.data['strict_load'] is enabled. Removing extra columns") - self.readings.drop(extra_columns, axis=1, inplace=True) + if extra_sensors != []: + std_out(f'Removing sensors from device.sensors: {extra_sensors}', 'WARNING') + for sensor_to_remove in extra_sensors: + self.sensors.pop(sensor_to_remove, None) + if extra_columns != []: + self.data.drop(extra_columns, axis=1, inplace=True) else: std_out(f"config.data['strict_load'] is disabled. Ignoring extra columns") std_out(f'Device sensors after checks: {list(self.sensors.keys())}') + # TODO Check def __convert_names__(self): rename = dict() for sensor in self.sensors: if 'id' in self.sensors[sensor]: - if self.sensors[sensor]['id'] in self.readings.columns: + if self.sensors[sensor]['id'] in self.data.columns: rename[self.sensors[sensor]['id']] = sensor - self.readings.rename(columns=rename, inplace=True) + self.data.rename(columns=rename, inplace=True) def __convert_units__(self): ''' Convert the units based on the UNIT_LUT and blueprint NB: what is read/written from/to the cache is not converted. The files are with original units, and then converted in the device only - for the readings but never chached like so. + for the data but never chached like so. ''' std_out('Checking if units need to be converted') for sensor in self.sensors: factor = get_units_convf(sensor, from_units = self.sensors[sensor]['units']) if factor != 1: - self.readings.rename(columns={sensor: sensor + '_in_' + self.sensors[sensor]['units']}, inplace=True) - self.readings.loc[:, sensor] = self.readings.loc[:, sensor + '_in_' + self.sensors[sensor]['units']]*factor + self.data.rename(columns={sensor: sensor + '_in_' + self.sensors[sensor]['units']}, inplace=True) + self.data.loc[:, sensor] = self.data.loc[:, sensor + '_in_' + self.sensors[sensor]['units']]*factor std_out('Units check done', 'SUCCESS') + # TODO Check def process(self, only_new = False, lmetrics = None): ''' Processes devices metrics, either added by the blueprint definition @@ -560,7 +421,7 @@ def process(self, only_new = False, lmetrics = None): ---------- only_new: boolean False - To process or not the existing channels in the Device.readings that are + To process or not the existing channels in the Device.data that are defined in Device.metrics lmetrics: list None @@ -587,7 +448,7 @@ def process(self, only_new = False, lmetrics = None): std_out(f'---') std_out(f'Processing {metric}') - if only_new and metric in self.readings: + if only_new and metric in self.data: std_out(f'Skipping. Already in device') continue @@ -595,6 +456,7 @@ def process(self, only_new = False, lmetrics = None): if 'from_list' in metrics[metric]: lazy_name = metrics[metric]['from_list'] else: + # TODO make this open lazy_name = f"scdata.device.process.{metrics[metric]['process']}" try: @@ -610,7 +472,7 @@ def process(self, only_new = False, lmetrics = None): if 'kwargs' in metrics[metric]: kwargs = metrics[metric]['kwargs'] try: - result = funct(self.readings, *args, **kwargs) + result = funct(self.data, *args, **kwargs) except KeyError: # print_exc() std_out('Metric args not in dataframe', 'ERROR') @@ -618,7 +480,7 @@ def process(self, only_new = False, lmetrics = None): pass else: if result is not None: - self.readings[metric] = result + self.data[metric] = result process_ok &= True # If the metric is None, might be for many reasons and shouldn't collapse # the process_ok @@ -632,25 +494,57 @@ def process(self, only_new = False, lmetrics = None): return self.processed + # TODO + def checks(self, level): + ''' + Device checks + ''' + # TODO Make checks dependent on each handler + if self.source == 'api': + # TODO normalise the functions accross all handlers + # Check status code from curl + response = self.api_device.checks() + response['status'] = 200 + + return response + + # TODO Check def update_latest_postprocessing(self): # Sets latest postprocessing to latest reading if self.source == 'api': + # TODO + # Deprecation + if self.source['handler'] == 'ScApiDevice': if self.api_device.get_device_postprocessing() is not None: std_out('Updating postprocessing') # Add latest postprocessing rounded up with # frequency so that we don't end up in # and endless loop processing only the latest data line - # (minute vs. second precission of the readings) - self.latest_postprocessing = localise_date(self.readings.index[-1]+\ + # (minute vs. second precission of the data) + self.latest_postprocessing = localise_date(self.data.index[-1]+\ to_timedelta(self.options['frequency']), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') self.api_device.postprocessing['latest_postprocessing'] = self.latest_postprocessing std_out(f"Updated latest_postprocessing to: {self.api_device.postprocessing['latest_postprocessing']}") + return True + else: + if self.api_device.json.postprocessing.id is not None: + std_out('Updating postprocessing') + # Add latest postprocessing rounded up with + # frequency so that we don't end up in + # and endless loop processing only the latest data line + # (minute vs. second precission of the data) + self.latest_postprocessing = localise_date(self.data.index[-1]+\ + to_timedelta(self.options['frequency']), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') + self.api_device.json.postprocessing.latest_postprocessing = self.latest_postprocessing + std_out(f"Updated latest_postprocessing to: {self.api_device.json.postprocessing['latest_postprocessing']}") + return True return False + # TODO Check def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): ''' Forwards data to another api. @@ -719,7 +613,7 @@ def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): is {self.forwarding_params}. Updating') if self.forwarding_params is not None: - df = self.readings.copy() + df = self.data.copy() df = df[df.columns.intersection(list(self.merge_sensor_metrics(ignore_empty=True).keys()))] df = clean(df, 'drop', how = 'all') @@ -745,67 +639,9 @@ def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): std_out('Empty forwarding information', 'ERROR') return False - def add_metric(self, metric = dict()): - ''' - Add a metric to the device to be processed by a callable function - Parameters - ---------- - metric: dict - Empty dict - Description of the metric to be added. It only adds it to - Device.metrics, but does not calculate anything yet. The metric dict needs - to follow the format: - metric = { - 'metric_name': {'process': - 'args': - 'kwargs': <**kwargs for @function_name> - 'from_list': - } - } - The 'from_list' parameter is optional, and onle needed if the process is not - already available in scdata.device.process. - - For a list of available processes call help(scdata.device.process) - - Example: - -------- - metric = {'NO2_CLEAN': {'process': 'clean_ts', - 'kwargs': {'name': pollutant, - 'limits': [0, 350], - 'window_size': 5} - }} - Returns - ---------- - True if added metric - ''' - - if 'metrics' not in vars(self): - std_out(f'Device {self.id} has no metrics yet. Adding') - self.metrics = dict() - - try: - metricn = next(iter(metric.keys())) - self.metrics[metricn] = metric[metricn] - except: - print_exc() - return False - - std_out(f'Metric {metric} added to metrics', 'SUCCESS') - return True - - def del_metric(self, metricn = ''): - if 'metrics' not in vars(self): return - if metricn in self.metrics: self.metrics.pop(metricn, None) - if metricn in self.readings.columns: self.readings.__delitem__(metricn) - - if metricn not in self.readings and metricn not in self.metrics: - std_out(f'Metric {metricn} removed from metrics', 'SUCCESS') - return True - return False - def export(self, path, forced_overwrite = False, file_format = 'csv'): ''' - Exports Device.readings to file + Exports Device.data to file Parameters ---------- path: String @@ -823,11 +659,12 @@ def export(self, path, forced_overwrite = False, file_format = 'csv'): ''' # Export device if file_format == 'csv': - return export_csv_file(path, str(self.id), self.readings, forced_overwrite = forced_overwrite) + return export_csv_file(path, str(self.id), self.data, forced_overwrite = forced_overwrite) else: std_out('Not supported format' ,'ERROR') return False + # TODO Check def post_sensors(self, clean_na = 'drop', chunk_size = 500, dry_run = False, max_retries = 2): ''' Posts devices sensors. Only available for parent of ScApiDevice @@ -837,7 +674,7 @@ def post_sensors(self, clean_na = 'drop', chunk_size = 500, dry_run = False, max 'drop' 'drop', 'fill' chunk_size: integer - chunk size to split resulting pandas DataFrame for posting readings + chunk size to split resulting pandas DataFrame for posting data dry_run: boolean False Post the payload to the API or just return it @@ -851,13 +688,10 @@ def post_sensors(self, clean_na = 'drop', chunk_size = 500, dry_run = False, max ''' post_ok = True - if self.sources[self.source]['handler'] != 'ScApiDevice': - std_out('Only supported processing post is to SmartCitizen API', 'ERROR') - return False rd = dict() - df = self.readings.copy().dropna(axis = 0, how='all') - for col in self.readings: + df = self.data.copy().dropna(axis = 0, how='all') + for col in self.data: if col not in self.sensors: std_out(f'Column ({col}) not in recognised IDs. Ignoring', 'WARNING') df.drop(col, axis=1, inplace=True) @@ -871,13 +705,14 @@ def post_sensors(self, clean_na = 'drop', chunk_size = 500, dry_run = False, max return False std_out(f'Trying to post {list(df.columns)}') - post_ok = self.api_device.post_data_to_device(df, clean_na = clean_na, + post_ok = self.handler.post_data_to_device(df, clean_na = clean_na, chunk_size = chunk_size, dry_run = dry_run, max_retries = max_retries) if post_ok: std_out(f'Posted data for {self.id}', 'SUCCESS') else: std_out(f'Error posting data for {self.id}', 'ERROR') return post_ok + # TODO Check def update_postprocessing(self, dry_run = False): ''' Posts device postprocessing. Only available for parent of ScApiDevice @@ -897,6 +732,7 @@ def update_postprocessing(self, dry_run = False): if post_ok: std_out(f"Postprocessing posted for device {self.id}", "SUCCESS") return post_ok + # TODO Check def post_metrics(self, with_postprocessing = False, chunk_size = 500, dry_run = False, max_retries = 2): ''' Posts devices metrics. Only available for parent of ScApiDevice @@ -906,7 +742,7 @@ def post_metrics(self, with_postprocessing = False, chunk_size = 500, dry_run = False Post the postprocessing_attributes too chunk_size: integer - chunk size to split resulting pandas DataFrame for posting readings + chunk size to split resulting pandas DataFrame for posting data dry_run: boolean False Post the payload to the API or just return it @@ -927,11 +763,11 @@ def post_metrics(self, with_postprocessing = False, chunk_size = 500, dry_run = rd = dict() std_out(f"Posting metrics for device {self.id}") # Make a copy of df - df = self.readings.copy().dropna(axis = 0, how='all') + df = self.data.copy().dropna(axis = 0, how='all') # Get metrics to post, only the ones that have True in 'post' field and a valid ID # Replace their name with the ID to post for metric in self.metrics: - if self.metrics[metric]['post'] == True and metric in self.readings.columns: + if self.metrics[metric]['post'] == True and metric in self.data.columns: std_out(f"Adding {metric} for device {self.id} (ID: {self.metrics[metric]['id']})") rd[metric] = self.metrics[metric]['id'] @@ -947,8 +783,7 @@ def post_metrics(self, with_postprocessing = False, chunk_size = 500, dry_run = return False std_out(f'Trying to post {list(df.columns)}') - post_ok = self.api_device.post_data_to_device(df, chunk_size = chunk_size, - dry_run = dry_run, max_retries = max_retries) + post_ok = self.api_device.post_data_to_device(df, chunk_size = chunk_size, dry_run = dry_run, max_retries = max_retries) if post_ok: std_out(f'Posted metrics for {self.id}', 'SUCCESS') else: std_out(f'Error posting metrics for {self.id}', 'ERROR') From 992ecd84c1a43b13229a26b07297ae6a436191cc Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 29 Nov 2023 18:50:37 +0100 Subject: [PATCH 03/72] Add new air quality blueprint --- blueprints/sc_air.json | 391 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 391 insertions(+) create mode 100644 blueprints/sc_air.json diff --git a/blueprints/sc_air.json b/blueprints/sc_air.json new file mode 100644 index 00000000..0acffd5c --- /dev/null +++ b/blueprints/sc_air.json @@ -0,0 +1,391 @@ +{ + "documentation": "https://docs.smartcitizen.me/", + "id": null, + "clean_na": null, + "timezone": null, + "frequency": null, + "max_date": null, + "min_date": null, + "resample": false, + "source": null, + "metrics": [ + { + "name": "CCS811_ECO2_CLEAN", + "description": "eCO2 cleaned data", + "kwargs": { + "limits": [ + 400, + 65000 + ], + "channel": "CCS811_ECO2", + "window_size": 5, + "window_type": null + }, + "process": "clean_ts", + "units": "ppm", + "post": false, + "id": null + }, + { + "name":"CCS811_VOCS_CLEAN", + "description": "Volatile Organic Compounds cleaned data", + "kwargs": { + "limits": [ + 0, + 65000 + ], + "channel": "CCS811_VOCS", + "window_size": 5, + "window_type": null + }, + "process": "clean_ts", + "units": "ppb", + "post": false, + "id": null + }, + { + "name": "EXT_PM_10_CLEAN", + "description": "PM10 calculated based on both PMS5003 PM10 inputs", + "kwargs": { + "factor": 0.3, + "limits": [ + 0, + 1000 + ], + "channels": [ + "EXT_PM_A_10", + "EXT_PM_B_10" + ], + "pick": "min", + "window_size": 5, + "window_type": null + }, + "process": "merge_ts", + "units": "ug/m3", + "post": true, + "id": 88 + }, + { + "name":"EXT_PM_1_CLEAN", + "description": "PM1 calculated based on both PMS5003 PM1 inputs", + "kwargs": { + "factor": 0.3, + "limits": [ + 0, + 1000 + ], + "channels": [ + "EXT_PM_A_1", + "EXT_PM_B_1" + ], + "pick": "min", + "window_size": 5, + "window_type": null + }, + "process": "merge_ts", + "units": "ug/m3", + "post": true, + "id": 89 + }, + { + "name": "EXT_PM_25_CLEAN", + "description": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", + "kwargs": { + "factor": 0.3, + "limits": [ + 0, + 1000 + ], + "channels": [ + "EXT_PM_A_25", + "EXT_PM_B_25" + ], + "pick": "min", + "window_size": 5, + "window_type": null + }, + "process": "merge_ts", + "units": "ug/m3", + "post": true, + "id": 87 + }, + { + "name": "PT1000_POS", + "description": "PT1000 raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "ASPT1000", + "description": "PT1000 temperature calculation in AFE", + "id": null, + "kwargs": { + "pt1000minus": null, + "pt1000plus": null, + "afe_id": null + }, + "post": false, + "process": "alphasense_pt1000", + "units": "degC" + }, + { + "name": "EC_SENSOR_TEMP", + "description": "Electrochemical sensor temperature", + "id": null, + "kwargs": { + "priority": "ASPT1000" + }, + "post": false, + "process": "ec_sensor_temp", + "units": "degC" + }, + { + "name": "CO_WE", + "description": "CO working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "CO_AE", + "description": "CO auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "NO2_WE", + "description": "NO2 working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "NO2_AE", + "description": "NO2 auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "NO_WE", + "description": "NO working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "NO_AE", + "description": "NO auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "SO2_WE", + "description": "SO2 working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "SO2_AE", + "description": "SO2 auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "H2S_WE", + "description": "H2S working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "H2S_AE", + "description": "H2S auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "OX_WE", + "description": "OX working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name": "OX_AE", + "description": "OX auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "process": "channel_names", + "units": "V" + }, + { + "name":"CO", + "description": "Calculation of CO based on AAN 803-04", + "id": 152, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + }, + { + "name":"NO2", + "description": "Calculation of NO2 based on AAN 803-04", + "id": 153, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + }, + { + "name":"O3", + "description": "Calculation of O3 based on AAN 803-04", + "id": 157, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + }, + { + "name":"SO2", + "description": "Calculation of SO2 based on AAN 803-04", + "id": 155, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null, + "use_alternative": true + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + }, +{ + "name": "NO", + "description": "Calculation of NO based on AAN 803-04", + "id": 154, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + }, + { + "name": "H2S", + "description": "Calculation of H2S based on AAN 803-04", + "id": 156, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "process": "alphasense_803_04", + "units": "ppb" + } + ], + "sources": [ + { + "type": "api", + "handler": "SCDevice", + "module": "smartcitizen_connector" + }, + { + "type": "csv", + "module": "scdata.io.csv", + "handler": "csv_handler", + "params": { + "header_skip": [ + 1, + 2, + 3 + ], + "index": "TIME", + "sep": ",", + "tz-aware": true + }, + "files": { + "processed-data-file": null, + "raw-data-file": null + } + } + ] +} From 616a830c88c5848be0604c42981e4817111c577a Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 29 Nov 2023 18:54:10 +0100 Subject: [PATCH 04/72] Test for new aq blueprint --- hardware/SCAS210099.json | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/hardware/SCAS210099.json b/hardware/SCAS210099.json index cec34496..9f5bc5e3 100644 --- a/hardware/SCAS210099.json +++ b/hardware/SCAS210099.json @@ -1,13 +1,12 @@ { - "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/master/blueprints/sc_21_station_module.json", - "description": "1PMS5003-2ELEC-AFE", - "forwarding": "nilu", + "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/blueprints/sc_air.json", + "description": "2PMS5003-2ELEC-AFE", "versions": [ { "ids": { - "AS_48_32": "132070362", - "AS_49_10": "212070552", - "PT_49_23": "12-000445" + "AS_48_32": "214240826", + "AS_49_10": "212741106", + "PT_49_23": "10-001227" }, "from": "2021-01-21", "to": null From eff075407d040447d0ca97f82b88b36c005f2a9f Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sat, 2 Dec 2023 08:52:28 +0100 Subject: [PATCH 05/72] Improve alphasense processing and cleanup --- scdata/_config/config.py | 23 +++++++ scdata/device/process/alphasense.py | 99 ++++++++--------------------- 2 files changed, 51 insertions(+), 71 deletions(-) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 84de880e..3f67bf93 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -177,6 +177,29 @@ class Config(object): 'GPS_HDOP': [-inf, 0, 40, 80, 120, 160, 200, 240, 260, 300, inf] } + _as_sensor_codes = { + '132': 'ASA4_CO', + '133': 'ASA4_H2S', + '130': 'ASA4_NO', + '212': 'ASA4_NO2', + '214': 'ASA4_OX', + '134': 'ASA4_SO2', + '162': 'ASB4_CO', + '133': 'ASB4_H2S',# + '130': 'ASB4_NO', # + '202': 'ASB4_NO2', + '204': 'ASB4_OX', + '164': 'ASB4_SO2' + } + + # Alphasense temperature channels (in order of priority) + _as_temp_channel = [ + "ASPT1000", + "SHT31_EXT_TEMP", + "SHT35_EXT_TEMP", + "PM_DALLAS_TEMP", + ] + _channel_bin_n = 11 # Molecular weights of certain pollutants for unit convertion diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index c0cc59c9..ce5c0dd1 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -13,10 +13,6 @@ def alphasense_803_04(dataframe, **kwargs): based on AAN803-04 Parameters ---------- - from_date: string, datetime object - Date from which this calibration id is valid from - to_date: string, datetime object - Date until which this calibration id is valid to. None if current alphasense_id: string Alphasense sensor ID (must be in calibrations.json) we: string @@ -28,8 +24,6 @@ def alphasense_803_04(dataframe, **kwargs): use_alternative: boolean Default false Use alternative algorithm as shown in the AAN - timezone: string - Valid timezone for date localisation Returns ------- calculation of pollutant in ppb @@ -73,26 +67,8 @@ def comp_t(x, comp_lut): std_out(f"Sensor {kwargs['alphasense_id']} not in calibration data", 'ERROR') return None - # Process input dates - if 'from_date' not in kwargs: from_date = None - else: - if 'timezone' not in kwargs: - std_out('Cannot localise date without timezone') - return None - from_date = localise_date(kwargs['from_date'], kwargs['timezone']) - - if 'to_date' not in kwargs: to_date = None - else: - if 'timezone' not in kwargs: - std_out('Cannot localise date without timezone') - return None - to_date = localise_date(kwargs['to_date'], kwargs['timezone']) - # Make copy df = dataframe.copy() - # Trim data - if from_date is not None: df = df[df.index > from_date] - if to_date is not None: df = df[df.index < to_date] # Get sensor type as_type = config._as_sensor_codes[kwargs['alphasense_id'][0:3]] @@ -119,7 +95,7 @@ def comp_t(x, comp_lut): # Remove spurious voltages (0V < electrode < 5V) for electrode in ['we', 'ae']: - subkwargs = {'name': kwargs[electrode], + subkwargs = {'name': kwargs[electrode], 'limits': (0, 5), # In V 'window_size': None } @@ -172,9 +148,8 @@ def ec_sensor_temp(dataframe, **kwargs): """ if 'priority' in kwargs: if kwargs['priority'] in dataframe.columns: return dataframe[kwargs['priority']] - if 'ASPT1000' in dataframe.columns: return dataframe['ASPT1000'] - if 'PM_DALLAS_TEMP' in dataframe.columns: return dataframe['PM_DALLAS_TEMP'] - if 'SHT31_EXT_TEMP' in dataframe.columns: return dataframe['SHT31_EXT_TEMP'] + for option in config._as_temp_channel: + if option in dataframe.columns: return dataframe[option] std_out('Problem with input data', 'ERROR') return None @@ -218,21 +193,6 @@ def alphasense_pt1000(dataframe, **kwargs): std_out(f"AFE {kwargs['afe_id']} not in calibration data", 'ERROR') return None - # Process input dates - if 'from_date' not in kwargs: from_date = None - else: - if 'timezone' not in kwargs: - std_out('Cannot localise date without timezone', 'ERROR') - return None - from_date = localise_date(kwargs['from_date'], kwargs['timezone']) - - if 'to_date' not in kwargs: to_date = None - else: - if 'timezone' not in kwargs: - std_out('Cannot localise date without timezone', 'ERROR') - return None - to_date = localise_date(kwargs['to_date'], kwargs['timezone']) - # Retrieve calibration data - verify its all float cal_data = config.calibrations[kwargs['afe_id']] for item in cal_data: @@ -245,9 +205,6 @@ def alphasense_pt1000(dataframe, **kwargs): # Make copy df = dataframe.copy() - # Trim data - if from_date is not None: df = df[df.index > from_date] - if to_date is not None: df = df[df.index < to_date] # Calculate temperature df['v20'] = cal_data['v20'] - (cal_data['t20'] - 20.0) / 1000.0 @@ -297,7 +254,7 @@ def basic_4electrode_alg(dataframe, **kwargs): Name of working electrode found in dataframe auxiliary: string Name of auxiliary electrode found in dataframe - id: int + id: int Sensor ID pollutant: string Pollutant name. Must be included in the corresponding LUTs for unit convertion and additional parameters: @@ -314,12 +271,12 @@ def basic_4electrode_alg(dataframe, **kwargs): if 'id' not in kwargs: flag_error = True if 'pollutant' not in kwargs: flag_error = True - if flag_error: + if flag_error: std_out('Problem with input data', 'ERROR') return None # Get Sensor data - if kwargs['id'] not in config.calibrations: + if kwargs['id'] not in config.calibrations: std_out(f"Sensor {kwargs['id']} not in calibration data", 'ERROR') return None @@ -328,14 +285,14 @@ def basic_4electrode_alg(dataframe, **kwargs): sensor_type = config.calibrations[kwargs['id']]['sensor_type'] nWA = config.calibrations[kwargs['id']]['we_sensor_zero_mv']/config.calibrations[kwargs['id']]['ae_sensor_zero_mv'] - if sensor_type != kwargs['pollutant']: + if sensor_type != kwargs['pollutant']: std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') return None # This is always in ppm since the calibration data is in signal/ppm if kwargs['hardware'] == 'alphadelta': current_factor = alphadelta_pcb elif kwargs['hardware'] == 'isb': current_factor = 1 #TODO make it so we talk in mV - else: + else: std_out(f"Measurement hardware {kwargs['hardware']} not supported", 'ERROR') return None @@ -352,7 +309,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): """ Calculates pollutant concentration based on 4 electrode sensor readings (mV), but using one of the metrics (baseline) as a baseline of the others. It uses the baseline correction algorithm - explained here: + explained here: https://docs.smartcitizen.me/Components/sensors/Electrochemical%20Sensors/#baseline-correction-based-on-temperature and the calibration ID. It adds a configurable background concentration. Parameters @@ -361,7 +318,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): Name of working electrode found in dataframe baseline: string Name of auxiliary electrode found in dataframe - id: int + id: int Sensor ID pollutant: string Pollutant name. Must be included in the corresponding LUTs for unit convertion and additional parameters: @@ -378,7 +335,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): Whether or not to store the baseline in the dataframe resample: str '1Min' - Resample frequency for the target dataframe + Resample frequency for the target dataframe pcb_factor: int alphadelta_pcb (6.36) Factor converting mV to nA due to the board configuration @@ -396,13 +353,13 @@ def baseline_4electrode_alg(dataframe, **kwargs): if 'baseline' not in kwargs: flag_error = True if 'id' not in kwargs: flag_error = True if 'pollutant' not in kwargs: flag_error = True - - if 'regression_type' in kwargs: + + if 'regression_type' in kwargs: if kwargs['regression_type'] not in ['best', 'exponential', 'linear']: flag_error = True else: reg_type = kwargs['regression_type'] else: reg_type = 'best' - - if 'period' in kwargs: + + if 'period' in kwargs: if kwargs['period'] not in ['best', 'exponential', 'linear']: flag_error = True else: period = kwargs['period'] else: period = '1D' @@ -411,18 +368,18 @@ def baseline_4electrode_alg(dataframe, **kwargs): else: store_baseline = True if 'resample' in kwargs: resample = kwargs['resample'] - else: resample = '1Min' + else: resample = '1Min' if 'pcb_factor' in kwargs: pcb_factor = kwargs['pcb_factor'] else: pcb_factor = alphadelta_pcb - + if 'baseline_type' in kwargs: baseline_type = kwargs['baseline_type'] else: baseline_type = 'deltas' if 'deltas' in kwargs: deltas = kwargs['deltas'] else: deltas = baseline_deltas - - if flag_error: + + if flag_error: std_out('Problem with input data', 'ERROR') return None @@ -443,17 +400,17 @@ def baseline_4electrode_alg(dataframe, **kwargs): target_2 = config.calibrations.loc[kwargs['id'],'target_2'] nWA = config.calibrations.loc[kwargs['id'],'w_zero_current']/config.calibrations.loc[kwargs['id'],'aux_zero_current'] - if target_1 != kwargs['pollutant']: + if target_1 != kwargs['pollutant']: std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') return None - + result = pcb_factor*(dataframe[kwargs['target']] - baseline)/abs(sensitivity_1) # Convert units result *= get_units_convf(kwargs['pollutant'], from_units = 'ppm') # Add Background concentration result += background_conc[kwargs['pollutant']] - + else: # Calculate non convolved part result = dataframe[kwargs['target']] - baseline @@ -461,7 +418,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): # Make use of DataFrame inmutable properties to store in it the baseline if store_baseline: dataframe[kwargs['target']+'_BASELINE'] = baseline - + return result def deconvolution(dataframe, **kwargs): @@ -474,7 +431,7 @@ def deconvolution(dataframe, **kwargs): Name of convolved metric containing both pollutants (such as NO2+O3) base: string Name of one of the already deconvolved pollutants (for instance NO2) - id: int + id: int Sensor ID pollutant: string Pollutant name. Must be included in the corresponding LUTs for unit convertion and additional parameters: @@ -493,7 +450,7 @@ def deconvolution(dataframe, **kwargs): if 'id' not in kwargs: flag_error = True if 'pollutant' not in kwargs: flag_error = True - if flag_error: + if flag_error: std_out('Problem with input data', 'ERROR') return None @@ -503,7 +460,7 @@ def deconvolution(dataframe, **kwargs): target_2 = config.calibrations.loc[kwargs['id'],'target_2'] nWA = config.calibrations.loc[kwargs['id'],'w_zero_current']/config.calibrations.loc[kwargs['id'],'aux_zero_current'] - if target_1 != kwargs['pollutant']: + if target_1 != kwargs['pollutant']: std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') return None @@ -511,8 +468,8 @@ def deconvolution(dataframe, **kwargs): factor_unit_2 = get_units_convf(kwargs['base'], from_units = 'ppm') result = factor_unit_1*(alphadelta_pcb*dataframe[kwargs['source']] - dataframe[kwargs['base']]/factor_unit_2*abs(sensitivity_2))/abs(sensitivity_1) - + # Add Background concentration result += background_conc[kwargs['pollutant']] - + return result From 8ccb2e4e28958ccb13451d7dfacede21358b92a0 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 4 Dec 2023 10:43:31 +0100 Subject: [PATCH 06/72] Further updates in processing, connect handler --- .gitignore | 4 ++ scdata/device/__init__.py | 124 ++++++++++++++------------------------ 2 files changed, 50 insertions(+), 78 deletions(-) diff --git a/.gitignore b/.gitignore index bf5de1c5..dc0a0ebf 100644 --- a/.gitignore +++ b/.gitignore @@ -152,3 +152,7 @@ ENV/ # pytest .pytest_cache/ + +tests/local + +scripts/ \ No newline at end of file diff --git a/scdata/device/__init__.py b/scdata/device/__init__.py index b6dbd1ce..ba588b07 100644 --- a/scdata/device/__init__.py +++ b/scdata/device/__init__.py @@ -70,20 +70,25 @@ def __init__(self, blueprint = None, source=dict(), params=dict()): # Set blueprint if blueprint is not None: self.blueprint = blueprint + if self.blueprint not in config.blueprints: + raise ValueError(f'Specified blueprint {self.blueprint} is not in available blueprints') + self.__set_blueprint_attrs__(config.blueprints[self.blueprint]) else: if url_checker(self.handler.blueprint_url): std_out(f'Loading postprocessing blueprint from:\n{self.handler.blueprint_url}') self.blueprint = basename(urlparse(self.handler.blueprint_url).path).split('.')[0] + else: + raise ValueError(f'Specified blueprint url {self.handler.blueprint_url} is not valid') + self.__set_blueprint_attrs__(self.handler.blueprint) - if self.blueprint not in config.blueprints: - raise ValueError(f'Specified blueprint {self.blueprint} is not in available blueprints') - - self.__set_blueprint_attrs__(config.blueprints[self.blueprint]) + # TODO Remove + # self.__fill_handler_metrics__() # Init the rest of the stuff self.data = DataFrame() self.loaded = False self.processed = False + self.postprocessing_updated = False std_out(f'Device {self.id} initialised', 'SUCCESS') def __set_handler__(self): @@ -211,8 +216,6 @@ def del_metric(self, metricn = ''): return True return False - # TODO Fix - # async def load(self, options = dict(), path = '', convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): async def load(self, convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): ''' Loads the device with some options @@ -294,20 +297,19 @@ async def load(self, convert_units = True, only_unprocessed = False, max_amount clean_na = self.clean_na, resample = self.resample, only_unprocessed = only_unprocessed) - df = self.handler.data else: - df = self.handler.get_data( + self.handler.get_data( min_date = self.min_date, max_date = self.max_date, freq = self.frequency, clean_na = self.clean_na, resample = self.resample, only_unprocessed = only_unprocessed) + # In principle this makes both dataframes as they are unmutable + self.data = self.handler.data - # Combine it with data if possible - if df is not None: - self.data = self.data.combine_first(df) - self.loaded = self.__load_wrapup__(max_amount, convert_units) + # Wrap it all up + self.loaded = self.__load_wrapup__(max_amount, convert_units) elif self.source['type'] == 'kafka': std_out('Not yet', 'ERROR') @@ -324,37 +326,31 @@ def __load_wrapup__(self, max_amount, convert_units): # TODO Dirty workaround std_out(f'Trimming dataframe to {max_amount} rows') self.data=self.data.dropna(axis = 0, how='all').head(max_amount) - # Only add metrics if there is something that can be potentially processed - self.__fill_metrics__() # Convert units if convert_units: self.__convert_units__() + self.postprocessing_updated = False return True else: std_out('Empty dataframe in data', 'WARNING') return False + else: + return False - def __fill_metrics__(self): + # TODO remove + def __fill_handler_metrics__(self): std_out('Checking if metrics need to be added based on hardware info') if self.handler.hardware_postprocessing is None: std_out(f'No hardware url in device {self.id}, ignoring') return None - for version in self.handler.hardware_postprocessing.versions: - - # Do not add any metric if the from_date of the calibration is after the last_reading_at as there would be nothing to process - if version.from_date is not None: - if version.from_date > self.handler.last_reading_at: - std_out('Postprocessing from_date is later than device last_reading_at', 'ERROR') - return None - - metrics = self.handler.get_metrics(version) - for metric in metrics: - if metric not in self.metrics: - std_out(f'Metric {metric} not in blueprint, ignoring.', 'WARNING') - continue - self.metrics[metric]['kwargs'] = metrics[metric] + for metric in self.handler.metrics: + metricn = next(iter(metric)) + if metricn not in self.metrics: + std_out(f'Metric {metricn} from handler not in blueprint, ignoring.', 'WARNING') + continue + self.metrics[metricn]['kwargs'] = metric[metricn]['kwargs'] def __check_sensors__(self): @@ -385,13 +381,14 @@ def __check_sensors__(self): std_out(f'Device sensors after checks: {list(self.sensors.keys())}') - # TODO Check def __convert_names__(self): rename = dict() for sensor in self.sensors: if 'id' in self.sensors[sensor]: if self.sensors[sensor]['id'] in self.data.columns: rename[self.sensors[sensor]['id']] = sensor + else: + std_out(f'No id in {self.sensors[sensor]}', 'WARNING') self.data.rename(columns=rename, inplace=True) def __convert_units__(self): @@ -410,7 +407,6 @@ def __convert_units__(self): self.data.loc[:, sensor] = self.data.loc[:, sensor + '_in_' + self.sensors[sensor]['units']]*factor std_out('Units check done', 'SUCCESS') - # TODO Check def process(self, only_new = False, lmetrics = None): ''' Processes devices metrics, either added by the blueprint definition @@ -433,6 +429,7 @@ def process(self, only_new = False, lmetrics = None): ''' process_ok = True + self.postprocessing_updated = False if 'metrics' not in vars(self): std_out(f'Device {self.id} has nothing to process. Skipping', 'WARNING') @@ -456,13 +453,11 @@ def process(self, only_new = False, lmetrics = None): if 'from_list' in metrics[metric]: lazy_name = metrics[metric]['from_list'] else: - # TODO make this open lazy_name = f"scdata.device.process.{metrics[metric]['process']}" try: funct = LazyCallable(lazy_name) except ModuleNotFoundError: - #print_exc() process_ok &= False std_out('Problem adding lazy callable to metrics list', 'ERROR') pass @@ -474,26 +469,32 @@ def process(self, only_new = False, lmetrics = None): try: result = funct(self.data, *args, **kwargs) except KeyError: - # print_exc() - std_out('Metric args not in dataframe', 'ERROR') + std_out('Cannot process requested function with data provided', 'ERROR') process_ok = False pass else: if result is not None: self.data[metric] = result process_ok &= True - # If the metric is None, might be for many reasons and shouldn't collapse - # the process_ok + # If the metric is None, might be for many reasons and shouldn't collapse the process_ok if process_ok: - if self.source == 'api': - self.update_latest_postprocessing() std_out(f"Device {self.id} processed", "SUCCESS") - - self.processed = process_ok + self.processed = process_ok & self.update_postprocessing_date() return self.processed + def update_postprocessing_date(self): + + latest_postprocessing = localise_date(self.data.index[-1]+\ + to_timedelta(self.frequency), 'UTC') + if self.handler.update_latest_postprocessing(latest_postprocessing): + if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing: + self.postprocessing_updated = True + else: + self.postprocessing_updated = False + return self.postprocessing_updated + # TODO def checks(self, level): ''' @@ -508,43 +509,7 @@ def checks(self, level): return response - # TODO Check - def update_latest_postprocessing(self): - # Sets latest postprocessing to latest reading - - if self.source == 'api': - # TODO - # Deprecation - if self.source['handler'] == 'ScApiDevice': - if self.api_device.get_device_postprocessing() is not None: - std_out('Updating postprocessing') - # Add latest postprocessing rounded up with - # frequency so that we don't end up in - # and endless loop processing only the latest data line - # (minute vs. second precission of the data) - self.latest_postprocessing = localise_date(self.data.index[-1]+\ - to_timedelta(self.options['frequency']), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - self.api_device.postprocessing['latest_postprocessing'] = self.latest_postprocessing - std_out(f"Updated latest_postprocessing to: {self.api_device.postprocessing['latest_postprocessing']}") - - return True - else: - if self.api_device.json.postprocessing.id is not None: - std_out('Updating postprocessing') - # Add latest postprocessing rounded up with - # frequency so that we don't end up in - # and endless loop processing only the latest data line - # (minute vs. second precission of the data) - self.latest_postprocessing = localise_date(self.data.index[-1]+\ - to_timedelta(self.options['frequency']), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - self.api_device.json.postprocessing.latest_postprocessing = self.latest_postprocessing - std_out(f"Updated latest_postprocessing to: {self.api_device.json.postprocessing['latest_postprocessing']}") - - return True - - return False - - # TODO Check + # TODO Remove def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): ''' Forwards data to another api. @@ -726,6 +691,9 @@ def update_postprocessing(self, dry_run = False): boolean True if posted ok, False otherwise ''' + if not self.postprocessing_updated: + std_out(f'Postprocessing is not up to date', 'ERROR') + return False post_ok = self.api_device.patch_postprocessing(dry_run=dry_run) From 53f9d127477f7d2f5c80346544c4927360a099fe Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 4 Dec 2023 10:44:18 +0100 Subject: [PATCH 07/72] Small functions for printing management --- scdata/utils/out.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/scdata/utils/out.py b/scdata/utils/out.py index db4ce8dc..99f71d32 100644 --- a/scdata/utils/out.py +++ b/scdata/utils/out.py @@ -1,6 +1,13 @@ from termcolor import colored from scdata._config import config from datetime import datetime +import sys + +def block_print(): + sys.stdout = open(os.devnull, 'w') + +def enable_print(): + sys.stdout = sys.__stdout__ def std_out(msg, mtype = None, force = False): out_level = config._out_level @@ -9,7 +16,7 @@ def std_out(msg, mtype = None, force = False): else: stamp = '' # Output levels: - # 'QUIET': nothing, + # 'QUIET': nothing, # 'NORMAL': warn, err # 'DEBUG': info, warn, err, success if force == True: priority = 2 @@ -17,11 +24,11 @@ def std_out(msg, mtype = None, force = False): elif out_level == 'NORMAL': priority = 1 elif out_level == 'DEBUG': priority = 2 - if mtype is None and priority>1: + if mtype is None and priority>1: print(f'[{stamp}] - ' + '[INFO] ' + msg) - elif mtype == 'SUCCESS' and priority>0: + elif mtype == 'SUCCESS' and priority>0: print(f'[{stamp}] - ' + colored('[SUCCESS] ', 'green') + msg) - elif mtype == 'WARNING' and priority>0: + elif mtype == 'WARNING' and priority>0: print(f'[{stamp}] - ' + colored('[WARNING] ', 'yellow') + msg) - elif mtype == 'ERROR' and priority>0: + elif mtype == 'ERROR' and priority>0: print(f'[{stamp}] - ' + colored('[ERROR] ', 'red') + msg) \ No newline at end of file From 5b323efaa5d7b3530774f5850e91748b272e97b5 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 4 Dec 2023 17:57:55 +0100 Subject: [PATCH 08/72] Move names to different model --- names/SCDevice.json | 836 +++++++++++++++++++++++++++++++++++++ names/sc_sensor_names.json | 836 ------------------------------------- 2 files changed, 836 insertions(+), 836 deletions(-) create mode 100644 names/SCDevice.json delete mode 100644 names/sc_sensor_names.json diff --git a/names/SCDevice.json b/names/SCDevice.json new file mode 100644 index 00000000..0735c715 --- /dev/null +++ b/names/SCDevice.json @@ -0,0 +1,836 @@ +[ + { + "name": "BATT", + "id": "10", + "description": "Battery", + "units": "%" + }, + { + "name": "BATT_VOLT", + "id": "0", + "description": "Battery voltage", + "units": "V" + }, + { + "name": "SDCARD", + "id": "0", + "description": "SDcard present", + "units": "Present" + }, + { + "name": "LIGHT", + "id": "14", + "description": "Light", + "units": "Lux" + }, + { + "name": "TEMP", + "id": "55", + "description": "Temperature", + "units": "C" + }, + { + "name": "HUM", + "id": "56", + "description": "Humidity", + "units": "%rh" + }, + { + "name": "NOISE_A", + "id": "53", + "description": "Noise dBA", + "units": "dBA" + }, + { + "name": "NOISE_B", + "id": "0", + "description": "Noise dBC", + "units": "dBC" + }, + { + "name": "NOISE_Z", + "id": "0", + "description": "Noise dBZ", + "units": "dB" + }, + { + "name": "NOISE_FFT", + "id": "0", + "description": "Noise FFT", + "units": " " + }, + { + "name": "ALT", + "id": "0", + "description": "Altitude", + "units": "M" + }, + { + "name": "PRESS", + "id": "58", + "description": "Barometric pressure", + "units": "kPa" + }, + { + "name": "PRESS_TEMP", + "id": "0", + "description": "Pressure internal temperature", + "units": "C" + }, + { + "name": "CCS811_VOCS", + "id": "113", + "description": "VOC Gas CCS811", + "units": "ppb" + }, + { + "name": "CCS811_ECO2", + "id": "112", + "description": "eCO2 Gas CCS811", + "units": "ppm" + }, + { + "name": "PMS5003_PM_1", + "id": "89", + "description": "PM 1.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_PM_25", + "id": "87", + "description": "PM 2.5", + "units": "ug/m3" + }, + { + "name": "PMS5003_PM_10", + "id": "88", + "description": "PM 10.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_PN_03", + "id": "165", + "description": "PN 0.3", + "units": "#/0.1l" + }, + { + "name": "PMS5003_PN_05", + "id": "166", + "description": "PN 0.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_PN_1", + "id": "167", + "description": "PN 1.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_PN_25", + "id": "168", + "description": "PN 2.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_PN_5", + "id": "169", + "description": "PN 5.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_PN_10", + "id": "170", + "description": "PN 10.0", + "units": "#/0.1l" + }, + { + "name": "GB_1A", + "id": "65", + "description": "Gases Board 1A", + "units": "mV" + }, + { + "name": "GB_1W", + "id": "64", + "description": "Gases Board 1W", + "units": "mV" + }, + { + "name": "GB_2A", + "id": "62", + "description": "Gases Board 2A", + "units": "mV" + }, + { + "name": "GB_2W", + "id": "61", + "description": "Gases Board 2W", + "units": "mV" + }, + { + "name": "GB_3A", + "id": "68", + "description": "Gases Board 3A", + "units": "mV" + }, + { + "name": "GB_3W", + "id": "67", + "description": "Gases Board 3W", + "units": "mV" + }, + { + "name": "GB_TEMP", + "id": "79", + "description": "Gases Board Temperature", + "units": "C" + }, + { + "name": "GB_HUM", + "id": "80", + "description": "Gases Board Humidity", + "units": "%" + }, + { + "name": "GR_ADC", + "id": "25", + "description": "Groove ADC", + "units": "V" + }, + { + "name": "INA_VBUS", + "id": "0", + "description": "INA219 Bus voltage", + "units": "V" + }, + { + "name": "INA_VSHUNT", + "id": "0", + "description": "INA219 Shunt voltage", + "units": "mV" + }, + { + "name": "INA_CURR", + "id": "0", + "description": "INA219 Current", + "units": "mA" + }, + { + "name": "INA_VLOAD", + "id": "0", + "description": "INA219 Load voltage", + "units": "V" + }, + { + "name": "DS_WAT_TEMP", + "id": "42", + "description": "DS18B20 Water temperature", + "units": "C" + }, + { + "name": "AS_TEMP", + "id": "51", + "description": "Atlas Temperature", + "units": "C" + }, + { + "name": "AS_PH", + "id": "43", + "description": "Atlas PH", + "units": "pH" + }, + { + "name": "AS_COND", + "id": "45", + "description": "Atlas Conductivity", + "units": "uS/cm" + }, + { + "name": "AS_TDS", + "id": "122", + "description": "Atlas Total Dissolved Solids", + "units": "ppm" + }, + { + "name": "AS_SAL", + "id": "123", + "description": "Atlas Salinity", + "units": "PSU(ppt)" + }, + { + "name": "AS_SG", + "id": "46", + "description": "Atlas Specific gravity", + "units": "" + }, + { + "name": "AS_DO", + "id": "48", + "description": "Atlas Dissolved Oxygen", + "units": "mg/L" + }, + { + "name": "AS_DO_SAT", + "id": "49", + "description": "Atlas DO Saturation", + "units": "%" + }, + { + "name": "AS_ORP", + "id": "164", + "description": "Atlas Redox potential", + "units": "mV" + }, + { + "name": "CHRP_MOIS_RAW", + "id": "0", + "description": "Soil Moisture Raw", + "units": "" + }, + { + "name": "CHRP_MOIS", + "id": "50", + "description": "Soil Moisture Percent", + "units": "%" + }, + { + "name": "CHRP_TEMP", + "id": "0", + "description": "Soil Temperature", + "units": "C" + }, + { + "name": "CHRP_LIGHT", + "id": "0", + "description": "Soil Light", + "units": "" + }, + { + "name": "PMS5003_EXT_PM_A_1", + "id": "71", + "description": "Ext PM_A 1.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PM_A_25", + "id": "72", + "description": "Ext PM_A 2.5", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PM_A_10", + "id": "73", + "description": "Ext PM_A 10.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PN_A_03", + "id": "99", + "description": "Ext PN_A 0.3", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_A_05", + "id": "100", + "description": "Ext PN_A 0.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_A_1", + "id": "101", + "description": "Ext PN_A 1.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_A_25", + "id": "102", + "description": "Ext PN_A 2.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_A_5", + "id": "103", + "description": "Ext PN_A 5.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_A_10", + "id": "104", + "description": "Ext PN_A 10.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PM_B_1", + "id": "75", + "description": "Ext PM_B 1.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PM_B_25", + "id": "76", + "description": "Ext PM_B 2.5", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PM_B_10", + "id": "77", + "description": "Ext PM_B 10.0", + "units": "ug/m3" + }, + { + "name": "PMS5003_EXT_PN_B_03", + "id": "105", + "description": "Ext PN_B 0.3", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_B_05", + "id": "106", + "description": "Ext PN_B 0.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_B_1", + "id": "107", + "description": "Ext PN_B 1.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_B_25", + "id": "108", + "description": "Ext PN_B 2.5", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_B_5", + "id": "109", + "description": "Ext PN_B 5.0", + "units": "#/0.1l" + }, + { + "name": "PMS5003_EXT_PN_B_10", + "id": "110", + "description": "Ext PN_B 10.0", + "units": "#/0.1l" + }, + { + "name": "PM_DALLAS_TEMP", + "id": "96", + "description": "PM board Dallas Temperature", + "units": "C" + }, + { + "name": "DALLAS_TEMP", + "id": "96", + "description": "Direct Dallas Temperature", + "units": "C" + }, + { + "name": "SHT31_EXT_TEMP", + "id": "79", + "description": "Ext SHT31 Temperature", + "units": "C" + }, + { + "name": "SHT31_EXT_HUM", + "id": "80", + "description": "Ext SHT31 Humidity", + "units": "%" + }, + { + "name": "SHT35_EXT_TEMP", + "id": "0", + "description": "Ext SHT35 Temperature", + "units": "C" + }, + { + "name": "SHT35_EXT_HUM", + "id": "0", + "description": "Ext SHT35 Humidity", + "units": "%" + }, + { + "name": "EXT_RANGE_LIGHT", + "id": "0", + "description": "Ext Range Light", + "units": "Lux" + }, + { + "name": "EXT_RANGE_DIST", + "id": "98", + "description": "Ext Range Distance", + "units": "mm" + }, + { + "name": "BME680_TEMP", + "id": "0", + "description": "Temperature BME680", + "units": "C" + }, + { + "name": "BME680_HUM", + "id": "0", + "description": "Humidity BME680", + "units": "%" + }, + { + "name": "BME680_PRESS", + "id": "0", + "description": "Barometric pressure BME680", + "units": "kPa" + }, + { + "name": "BME680_VOCS", + "id": "0", + "description": "VOC Gas BME680", + "units": "Ohms" + }, + { + "name": "GPS_FIX", + "id": "128", + "description": "GPS Fix Quality", + "units": "" + }, + { + "name": "GPS_LAT", + "id": "125", + "description": "GPS Latitude", + "units": "Deg" + }, + { + "name": "GPS_LONG", + "id": "126", + "description": "GPS Longitude", + "units": "Deg" + }, + { + "name": "GPS_ALT", + "id": "127", + "description": "GPS Altitude", + "units": "m" + }, + { + "name": "GPS_SPEED", + "id": "129", + "description": "GPS Speed", + "units": "m/s" + }, + { + "name": "GPS_HDOP", + "id": "131", + "description": "GPS Horizontal Dilution of Position", + "units": "" + }, + { + "name": "GPS_SATNUM", + "id": "130", + "description": "GPS Traked Satellites", + "units": "" + }, + { + "name": "ADC_48_0", + "id": "133", + "description": "ADS1x15 ADC 0x48 Ch0", + "units": "V" + }, + { + "name": "ADC_48_1", + "id": "134", + "description": "ADS1x15 ADC 0x48 Ch1", + "units": "V" + }, + { + "name": "ADC_48_2", + "id": "135", + "description": "ADS1x15 ADC 0x48 Ch2", + "units": "V" + }, + { + "name": "ADC_48_3", + "id": "136", + "description": "ADS1x15 ADC 0x48 Ch3", + "units": "V" + }, + { + "name": "ADC_49_0", + "id": "138", + "description": "ADS1x15 ADC 0x49 Ch0", + "units": "V" + }, + { + "name": "ADC_49_1", + "id": "139", + "description": "ADS1x15 ADC 0x49 Ch1", + "units": "V" + }, + { + "name": "ADC_49_2", + "id": "140", + "description": "ADS1x15 ADC 0x49 Ch2", + "units": "V" + }, + { + "name": "ADC_49_3", + "id": "141", + "description": "ADS1x15 ADC 0x49 Ch3", + "units": "V" + }, + { + "name": "ADC_4A_0", + "id": "143", + "description": "ADS1x15 ADC 0x4A Ch0", + "units": "V" + }, + { + "name": "ADC_4A_1", + "id": "144", + "description": "ADS1x15 ADC 0x4A Ch1", + "units": "V" + }, + { + "name": "ADC_4A_2", + "id": "145", + "description": "ADS1x15 ADC 0x4A Ch2", + "units": "V" + }, + { + "name": "ADC_4A_3", + "id": "146", + "description": "ADS1x15 ADC 0x4A Ch3", + "units": "V" + }, + { + "name": "ADC_4B_0", + "id": "148", + "description": "ADS1x15 ADC 0x4B Ch0", + "units": "V" + }, + { + "name": "ADC_4B_1", + "id": "149", + "description": "ADS1x15 ADC 0x4B Ch1", + "units": "V" + }, + { + "name": "ADC_4B_2", + "id": "150", + "description": "ADS1x15 ADC 0x4B Ch2", + "units": "V" + }, + { + "name": "ADC_4B_3", + "id": "151", + "description": "ADS1x15 ADC 0x4B Ch3", + "units": "V" + }, + { + "name": "SCD30_CO2", + "id": "158", + "description": "SCD30 CO2", + "units": "ppm" + }, + { + "name": "SCD30_TEMP", + "id": "160", + "description": "SCD30 Temperature", + "units": "C" + }, + { + "name": "SCD30_HUM", + "id": "161", + "description": "SCD30 Humidity", + "units": "%" + }, + { + "name": "SFA30_HCHO", + "id": "212", + "description": "SFA30 HCHO", + "units": "ppb" + }, + { + "name": "SFA30_TEMP", + "id": "211", + "description": "SFA30 Temperature", + "units": "C" + }, + { + "name": "SFA30_HUM", + "id": "210", + "description": "SFA30 Humidity", + "units": "%" + }, + { + "name": "RSSI", + "id": "220", + "description": "Wi-Fi RSSI", + "units": "dBm" + }, + { + "name": "SD-card", + "id": "221", + "description": "SD card presence", + "units": "" + }, + { + "name": "GR_OLED", + "id": "0", + "description": "Groove OLED", + "units": "" + }, + { + "name": ,"SPS30_PM_1" + "id": "182", + "description": "PM1 measurement from SPS30", + "units": "ug/m3" + }, + { + "name": ,"SPS30_PM_10" + "id": "185", + "description": "PM10 measurement from SPS30", + "units": "ug/m3" + }, + { + "name": ,"SPS30_PM_25" + "id": "183", + "description": "PM2.5 measurement from SPS30", + "units": "ug/m3" + }, + { + "name": ,"SPS30_PM_40" + "id": "184", + "description": "PM4.0 measurement from SPS30", + "units": "ug/m3" + }, + { + "name": ,"SPS30_PN_05" + "id": "186", + "description": "PN0.5 measurement from SPS30", + "units": "#/0.1l" + }, + { + "name": ,"SPS30_PN_1" + "id": "187", + "description": "PN1.0 measurement from SPS30", + "units": "#/0.1l" + }, + { + "name": ,"SPS30_PN_25" + "id": "188", + "description": "PN2.5 measurement from SPS30", + "units": "#/0.1l" + }, + { + "name": ,"SPS30_PN_40" + "id": "189", + "description": "PN4.0 measurement from SPS30", + "units": "#/0.1l" + }, + { + "name": ,"SPS30_PN_10" + "id": "190", + "description": "PN10.0 measurement from SPS30", + "units": "#/0.1l" + }, + { + "name": ,"SPS30_TPS" + "id": "191", + "description": "Typical Particle Size from SPS30", + "units": "um" + }, + { + "name": ,"SEN5X_PM_1" + "id": "193", + "description": "PM1 measurement from SEN5X", + "units": "ug/m3" + }, + { + "name": ,"SEN5X_PM_10" + "id": "196", + "description": "PM10 measurement from SEN5X", + "units": "ug/m3" + }, + { + "name": ,"SEN5X_PM_25" + "id": "194", + "description": "PM2.5 measurement from SEN5X", + "units": "ug/m3" + }, + { + "name": ,"SEN5X_PM_40" + "id": "195", + "description": "PM4.0 measurement from SEN5X", + "units": "ug/m3" + }, + { + "name": ,"SEN5X_PN_05" + "id": "197", + "description": "PN0.5 measurement from SEN5X", + "units": "#/0.1l" + }, + { + "name": ,"SEN5X_PN_1" + "id": "198", + "description": "PN1.0 measurement from SEN5X", + "units": "#/0.1l" + }, + { + "name": ,"SEN5X_PN_25" + "id": "199", + "description": "PN2.5 measurement from SEN5X", + "units": "#/0.1l" + }, + { + "name": ,"SEN5X_PN_40" + "id": "200", + "description": "PN4.0 measurement from SEN5X", + "units": "#/0.1l" + }, + { + "name": ,"SEN5X_PN_10" + "id": "201", + "description": "PN10.0 measurement from SEN5X", + "units": "#/0.1l" + }, + { + "name": ,"SEN5X_TPS" + "id": "202", + "description": "Typical Particle Size from SEN5X", + "units": "um" + }, + { + "name": "SEN5X_TEMP", + "id": "204", + "description": "SEN5X Temperature", + "units": "C" + }, + { + "name": "SEN5X_HUM", + "id": "203", + "description": "SEN5X Humidity", + "units": "%rh" + }, + { + "name": "SEN5X_VOCS_IDX", + "id": "205", + "description": "SEN5X VOCs Index", + "units": "" + }, + { + "name": "SEN5X_NOX_IDX", + "id": "206", + "description": "SEN5X NOX Index", + "units": "" + }, + { + "name": "SEN5X_VOCS_RAW", + "id": "207", + "description": "SEN5X VOCs raw", + "units": "" + }, + { + "name": "SEN5X_NOX_RAW", + "id": "208", + "description": "SEN5X NOX raw", + "units": "" + } +] \ No newline at end of file diff --git a/names/sc_sensor_names.json b/names/sc_sensor_names.json deleted file mode 100644 index 503daa29..00000000 --- a/names/sc_sensor_names.json +++ /dev/null @@ -1,836 +0,0 @@ -{ - "BATT": - { - "id": "10", - "desc": "Battery", - "units": "%" - }, - "BATT_VOLT": - { - "id": "0", - "desc": "Battery voltage", - "units": "V" - }, - "SDCARD": - { - "id": "0", - "desc": "SDcard present", - "units": "Present" - }, - "LIGHT": - { - "id": "14", - "desc": "Light", - "units": "Lux" - }, - "TEMP": - { - "id": "55", - "desc": "Temperature", - "units": "C" - }, - "HUM": - { - "id": "56", - "desc": "Humidity", - "units": "%rh" - }, - "NOISE_A": - { - "id": "53", - "desc": "Noise dBA", - "units": "dBA" - }, - "NOISE_B": - { - "id": "0", - "desc": "Noise dBC", - "units": "dBC" - }, - "NOISE_Z": - { - "id": "0", - "desc": "Noise dBZ", - "units": "dB" - }, - "NOISE_FFT": - { - "id": "0", - "desc": "Noise FFT", - "units": " " - }, - "ALT": - { - "id": "0", - "desc": "Altitude", - "units": "M" - }, - "PRESS": - { - "id": "58", - "desc": "Barometric pressure", - "units": "kPa" - }, - "PRESS_TEMP": - { - "id": "0", - "desc": "Pressure internal temperature", - "units": "C" - }, - "CCS811_VOCS": - { - "id": "113", - "desc": "VOC Gas CCS811", - "units": "ppb" - }, - "CCS811_ECO2": - { - "id": "112", - "desc": "eCO2 Gas CCS811", - "units": "ppm" - }, - "PMS5003_PM_1": - { - "id": "89", - "desc": "PM 1.0", - "units": "ug/m3" - }, - "PMS5003_PM_25": - { - "id": "87", - "desc": "PM 2.5", - "units": "ug/m3" - }, - "PMS5003_PM_10": - { - "id": "88", - "desc": "PM 10.0", - "units": "ug/m3" - }, - "PMS5003_PN_03": - { - "id": "165", - "desc": "PN 0.3", - "units": "#/0.1l" - }, - "PMS5003_PN_05": - { - "id": "166", - "desc": "PN 0.5", - "units": "#/0.1l" - }, - "PMS5003_PN_1": - { - "id": "167", - "desc": "PN 1.0", - "units": "#/0.1l" - }, - "PMS5003_PN_25": - { - "id": "168", - "desc": "PN 2.5", - "units": "#/0.1l" - }, - "PMS5003_PN_5": - { - "id": "169", - "desc": "PN 5.0", - "units": "#/0.1l" - }, - "PMS5003_PN_10": - { - "id": "170", - "desc": "PN 10.0", - "units": "#/0.1l" - }, - "GB_1A": - { - "id": "65", - "desc": "Gases Board 1A", - "units": "mV" - }, - "GB_1W": - { - "id": "64", - "desc": "Gases Board 1W", - "units": "mV" - }, - "GB_2A": - { - "id": "62", - "desc": "Gases Board 2A", - "units": "mV" - }, - "GB_2W": - { - "id": "61", - "desc": "Gases Board 2W", - "units": "mV" - }, - "GB_3A": - { - "id": "68", - "desc": "Gases Board 3A", - "units": "mV" - }, - "GB_3W": - { - "id": "67", - "desc": "Gases Board 3W", - "units": "mV" - }, - "GB_TEMP": - { - "id": "79", - "desc": "Gases Board Temperature", - "units": "C" - }, - "GB_HUM": - { - "id": "80", - "desc": "Gases Board Humidity", - "units": "%" - }, - "GR_ADC": - { - "id": "25", - "desc": "Groove ADC", - "units": "V" - }, - "INA_VBUS": - { - "id": "0", - "desc": "INA219 Bus voltage", - "units": "V" - }, - "INA_VSHUNT": - { - "id": "0", - "desc": "INA219 Shunt voltage", - "units": "mV" - }, - "INA_CURR": - { - "id": "0", - "desc": "INA219 Current", - "units": "mA" - }, - "INA_VLOAD": - { - "id": "0", - "desc": "INA219 Load voltage", - "units": "V" - }, - "DS_WAT_TEMP": - { - "id": "42", - "desc": "DS18B20 Water temperature", - "units": "C" - }, - "AS_TEMP": - { - "id": "51", - "desc": "Atlas Temperature", - "units": "C" - }, - "AS_PH": - { - "id": "43", - "desc": "Atlas PH", - "units": "pH" - }, - "AS_COND": - { - "id": "45", - "desc": "Atlas Conductivity", - "units": "uS/cm" - }, - "AS_TDS": - { - "id": "122", - "desc": "Atlas Total Dissolved Solids", - "units": "ppm" - }, - "AS_SAL": - { - "id": "123", - "desc": "Atlas Salinity", - "units": "PSU(ppt)" - }, - "AS_SG": - { - "id": "46", - "desc": "Atlas Specific gravity", - "units": "" - }, - "AS_DO": - { - "id": "48", - "desc": "Atlas Dissolved Oxygen", - "units": "mg/L" - }, - "AS_DO_SAT": - { - "id": "49", - "desc": "Atlas DO Saturation", - "units": "%" - }, - "AS_ORP": - { - "id": "164", - "desc": "Atlas Redox potential", - "units": "mV" - }, - "CHRP_MOIS_RAW": - { - "id": "0", - "desc": "Soil Moisture Raw", - "units": "" - }, - "CHRP_MOIS": - { - "id": "50", - "desc": "Soil Moisture Percent", - "units": "%" - }, - "CHRP_TEMP": - { - "id": "0", - "desc": "Soil Temperature", - "units": "C" - }, - "CHRP_LIGHT": - { - "id": "0", - "desc": "Soil Light", - "units": "" - }, - "PMS5003_EXT_PM_A_1": - { - "id": "71", - "desc": "Ext PM_A 1.0", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_A_25": - { - "id": "72", - "desc": "Ext PM_A 2.5", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_A_10": - { - "id": "73", - "desc": "Ext PM_A 10.0", - "units": "ug/m3" - }, - "PMS5003_EXT_PN_A_03": - { - "id": "99", - "desc": "Ext PN_A 0.3", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_A_05": - { - "id": "100", - "desc": "Ext PN_A 0.5", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_A_1": - { - "id": "101", - "desc": "Ext PN_A 1.0", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_A_25": - { - "id": "102", - "desc": "Ext PN_A 2.5", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_A_5": - { - "id": "103", - "desc": "Ext PN_A 5.0", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_A_10": - { - "id": "104", - "desc": "Ext PN_A 10.0", - "units": "#/0.1l" - }, - "PMS5003_EXT_PM_B_1": - { - "id": "75", - "desc": "Ext PM_B 1.0", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_B_25": - { - "id": "76", - "desc": "Ext PM_B 2.5", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_B_10": - { - "id": "77", - "desc": "Ext PM_B 10.0", - "units": "ug/m3" - }, - "PMS5003_EXT_PN_B_03": - { - "id": "105", - "desc": "Ext PN_B 0.3", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_B_05": - { - "id": "106", - "desc": "Ext PN_B 0.5", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_B_1": - { - "id": "107", - "desc": "Ext PN_B 1.0", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_B_25": - { - "id": "108", - "desc": "Ext PN_B 2.5", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_B_5": - { - "id": "109", - "desc": "Ext PN_B 5.0", - "units": "#/0.1l" - }, - "PMS5003_EXT_PN_B_10": - { - "id": "110", - "desc": "Ext PN_B 10.0", - "units": "#/0.1l" - }, - "PM_DALLAS_TEMP": - { - "id": "96", - "desc": "PM board Dallas Temperature", - "units": "C" - }, - "DALLAS_TEMP": - { - "id": "96", - "desc": "Direct Dallas Temperature", - "units": "C" - }, - "SHT31_EXT_TEMP": - { - "id": "79", - "desc": "Ext SHT31 Temperature", - "units": "C" - }, - "SHT31_EXT_HUM": - { - "id": "80", - "desc": "Ext SHT31 Humidity", - "units": "%" - }, - "SHT35_EXT_TEMP": - { - "id": "0", - "desc": "Ext SHT35 Temperature", - "units": "C" - }, - "SHT35_EXT_HUM": - { - "id": "0", - "desc": "Ext SHT35 Humidity", - "units": "%" - }, - "EXT_RANGE_LIGHT": - { - "id": "0", - "desc": "Ext Range Light", - "units": "Lux" - }, - "EXT_RANGE_DIST": - { - "id": "98", - "desc": "Ext Range Distance", - "units": "mm" - }, - "BME680_TEMP": - { - "id": "0", - "desc": "Temperature BME680", - "units": "C" - }, - "BME680_HUM": - { - "id": "0", - "desc": "Humidity BME680", - "units": "%" - }, - "BME680_PRESS": - { - "id": "0", - "desc": "Barometric pressure BME680", - "units": "kPa" - }, - "BME680_VOCS": - { - "id": "0", - "desc": "VOC Gas BME680", - "units": "Ohms" - }, - "GPS_FIX": - { - "id": "128", - "desc": "GPS Fix Quality", - "units": "" - }, - "GPS_LAT": - { - "id": "125", - "desc": "GPS Latitude", - "units": "Deg" - }, - "GPS_LONG": - { - "id": "126", - "desc": "GPS Longitude", - "units": "Deg" - }, - "GPS_ALT": - { - "id": "127", - "desc": "GPS Altitude", - "units": "m" - }, - "GPS_SPEED": - { - "id": "129", - "desc": "GPS Speed", - "units": "m/s" - }, - "GPS_HDOP": - { - "id": "131", - "desc": "GPS Horizontal Dilution of Position", - "units": "" - }, - "GPS_SATNUM": - { - "id": "130", - "desc": "GPS Traked Satellites", - "units": "" - }, - "ADC_48_0": - { - "id": "133", - "desc": "ADS1x15 ADC 0x48 Ch0", - "units": "V" - }, - "ADC_48_1": - { - "id": "134", - "desc": "ADS1x15 ADC 0x48 Ch1", - "units": "V" - }, - "ADC_48_2": - { - "id": "135", - "desc": "ADS1x15 ADC 0x48 Ch2", - "units": "V" - }, - "ADC_48_3": - { - "id": "136", - "desc": "ADS1x15 ADC 0x48 Ch3", - "units": "V" - }, - "ADC_49_0": - { - "id": "138", - "desc": "ADS1x15 ADC 0x49 Ch0", - "units": "V" - }, - "ADC_49_1": - { - "id": "139", - "desc": "ADS1x15 ADC 0x49 Ch1", - "units": "V" - }, - "ADC_49_2": - { - "id": "140", - "desc": "ADS1x15 ADC 0x49 Ch2", - "units": "V" - }, - "ADC_49_3": - { - "id": "141", - "desc": "ADS1x15 ADC 0x49 Ch3", - "units": "V" - }, - "ADC_4A_0": - { - "id": "143", - "desc": "ADS1x15 ADC 0x4A Ch0", - "units": "V" - }, - "ADC_4A_1": - { - "id": "144", - "desc": "ADS1x15 ADC 0x4A Ch1", - "units": "V" - }, - "ADC_4A_2": - { - "id": "145", - "desc": "ADS1x15 ADC 0x4A Ch2", - "units": "V" - }, - "ADC_4A_3": - { - "id": "146", - "desc": "ADS1x15 ADC 0x4A Ch3", - "units": "V" - }, - "ADC_4B_0": - { - "id": "148", - "desc": "ADS1x15 ADC 0x4B Ch0", - "units": "V" - }, - "ADC_4B_1": - { - "id": "149", - "desc": "ADS1x15 ADC 0x4B Ch1", - "units": "V" - }, - "ADC_4B_2": - { - "id": "150", - "desc": "ADS1x15 ADC 0x4B Ch2", - "units": "V" - }, - "ADC_4B_3": - { - "id": "151", - "desc": "ADS1x15 ADC 0x4B Ch3", - "units": "V" - }, - "SCD30_CO2": - { - "id": "158", - "desc": "SCD30 CO2", - "units": "ppm" - }, - "SCD30_TEMP": - { - "id": "160", - "desc": "SCD30 Temperature", - "units": "C" - }, - "SCD30_HUM": - { - "id": "161", - "desc": "SCD30 Humidity", - "units": "%" - }, - "SFA30_HCHO": - { - "id": "212", - "desc": "SFA30 HCHO", - "units": "ppb" - }, - "SFA30_TEMP": - { - "id": "211", - "desc": "SFA30 Temperature", - "units": "C" - }, - "SFA30_HUM": - { - "id": "210", - "desc": "SFA30 Humidity", - "units": "%" - }, - "RSSI": - { - "id": "220", - "desc": "Wi-Fi RSSI", - "units": "dBm" - }, - "SD-card": - { - "id": "221", - "desc": "SD card presence", - "units": "" - }, - "GR_OLED": - { - "id": "0", - "desc": "Groove OLED", - "units": "" - }, - "SPS30_PM_1": - { - "id": "182", - "desc": "PM1 measurement from SPS30", - "units": "ug/m3" - }, - "SPS30_PM_10": - { - "id": "185", - "desc": "PM10 measurement from SPS30", - "units": "ug/m3" - }, - "SPS30_PM_25": - { - "id": "183", - "desc": "PM2.5 measurement from SPS30", - "units": "ug/m3" - }, - "SPS30_PM_40": - { - "id": "184", - "desc": "PM4.0 measurement from SPS30", - "units": "ug/m3" - }, - "SPS30_PN_05": - { - "id": "186", - "desc": "PN0.5 measurement from SPS30", - "units": "#/0.1l" - }, - "SPS30_PN_1": - { - "id": "187", - "desc": "PN1.0 measurement from SPS30", - "units": "#/0.1l" - }, - "SPS30_PN_25": - { - "id": "188", - "desc": "PN2.5 measurement from SPS30", - "units": "#/0.1l" - }, - "SPS30_PN_40": - { - "id": "189", - "desc": "PN4.0 measurement from SPS30", - "units": "#/0.1l" - }, - "SPS30_PN_10": - { - "id": "190", - "desc": "PN10.0 measurement from SPS30", - "units": "#/0.1l" - }, - "SPS30_TPS": - { - "id": "191", - "desc": "Typical Particle Size from SPS30", - "units": "um" - }, - "SEN5X_PM_1": - { - "id": "193", - "desc": "PM1 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_10": - { - "id": "196", - "desc": "PM10 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_25": - { - "id": "194", - "desc": "PM2.5 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_40": - { - "id": "195", - "desc": "PM4.0 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PN_05": - { - "id": "197", - "desc": "PN0.5 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_1": - { - "id": "198", - "desc": "PN1.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_25": - { - "id": "199", - "desc": "PN2.5 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_40": - { - "id": "200", - "desc": "PN4.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_10": - { - "id": "201", - "desc": "PN10.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_TPS": - { - "id": "202", - "desc": "Typical Particle Size from SEN5X", - "units": "um" - }, - "SEN5X_TEMP": - { - "id": "204", - "desc": "SEN5X Temperature", - "units": "C" - }, - "SEN5X_HUM": - { - "id": "203", - "desc": "SEN5X Humidity", - "units": "%rh" - }, - "SEN5X_VOCS_IDX": - { - "id": "205", - "desc": "SEN5X VOCs Index", - "units": "" - }, - "SEN5X_NOX_IDX": - { - "id": "206", - "desc": "SEN5X NOX Index", - "units": "" - }, - "SEN5X_VOCS_RAW": - { - "id": "207", - "desc": "SEN5X VOCs raw", - "units": "" - }, - "SEN5X_NOX_RAW": - { - "id": "208", - "desc": "SEN5X NOX raw", - "units": "" - } -} \ No newline at end of file From 3e96e85299be7253f2aacefcd9e45e020c560b46 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 4 Dec 2023 18:18:00 +0100 Subject: [PATCH 09/72] Fix names json --- names/SCDevice.json | 120 ++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/names/SCDevice.json b/names/SCDevice.json index 0735c715..cd25cc76 100644 --- a/names/SCDevice.json +++ b/names/SCDevice.json @@ -678,124 +678,124 @@ "units": "" }, { - "name": ,"SPS30_PM_1" + "name": "SPS30_PM_1", "id": "182", - "description": "PM1 measurement from SPS30", - "units": "ug/m3" + "description": "PM1 measurement from SPS30", + "units": "ug/m3" }, { - "name": ,"SPS30_PM_10" + "name": "SPS30_PM_10", "id": "185", - "description": "PM10 measurement from SPS30", - "units": "ug/m3" + "description": "PM10 measurement from SPS30", + "units": "ug/m3" }, { - "name": ,"SPS30_PM_25" + "name": "SPS30_PM_25", "id": "183", - "description": "PM2.5 measurement from SPS30", - "units": "ug/m3" + "description": "PM2.5 measurement from SPS30", + "units": "ug/m3" }, { - "name": ,"SPS30_PM_40" + "name": "SPS30_PM_40", "id": "184", - "description": "PM4.0 measurement from SPS30", - "units": "ug/m3" + "description": "PM4.0 measurement from SPS30", + "units": "ug/m3" }, { - "name": ,"SPS30_PN_05" + "name": "SPS30_PN_05", "id": "186", - "description": "PN0.5 measurement from SPS30", - "units": "#/0.1l" + "description": "PN0.5 measurement from SPS30", + "units": "#/0.1l" }, { - "name": ,"SPS30_PN_1" + "name": "SPS30_PN_1", "id": "187", - "description": "PN1.0 measurement from SPS30", - "units": "#/0.1l" + "description": "PN1.0 measurement from SPS30", + "units": "#/0.1l" }, { - "name": ,"SPS30_PN_25" + "name": "SPS30_PN_25", "id": "188", - "description": "PN2.5 measurement from SPS30", - "units": "#/0.1l" + "description": "PN2.5 measurement from SPS30", + "units": "#/0.1l" }, { - "name": ,"SPS30_PN_40" + "name": "SPS30_PN_40", "id": "189", - "description": "PN4.0 measurement from SPS30", - "units": "#/0.1l" + "description": "PN4.0 measurement from SPS30", + "units": "#/0.1l" }, { - "name": ,"SPS30_PN_10" + "name": "SPS30_PN_10", "id": "190", - "description": "PN10.0 measurement from SPS30", - "units": "#/0.1l" + "description": "PN10.0 measurement from SPS30", + "units": "#/0.1l" }, { - "name": ,"SPS30_TPS" + "name": "SPS30_TPS", "id": "191", - "description": "Typical Particle Size from SPS30", - "units": "um" + "description": "Typical Particle Size from SPS30", + "units": "um" }, { - "name": ,"SEN5X_PM_1" + "name": "SEN5X_PM_1", "id": "193", - "description": "PM1 measurement from SEN5X", - "units": "ug/m3" + "description": "PM1 measurement from SEN5X", + "units": "ug/m3" }, { - "name": ,"SEN5X_PM_10" + "name": "SEN5X_PM_10", "id": "196", - "description": "PM10 measurement from SEN5X", - "units": "ug/m3" + "description": "PM10 measurement from SEN5X", + "units": "ug/m3" }, { - "name": ,"SEN5X_PM_25" + "name": "SEN5X_PM_25", "id": "194", - "description": "PM2.5 measurement from SEN5X", - "units": "ug/m3" + "description": "PM2.5 measurement from SEN5X", + "units": "ug/m3" }, { - "name": ,"SEN5X_PM_40" + "name": "SEN5X_PM_40", "id": "195", - "description": "PM4.0 measurement from SEN5X", - "units": "ug/m3" + "description": "PM4.0 measurement from SEN5X", + "units": "ug/m3" }, { - "name": ,"SEN5X_PN_05" + "name": "SEN5X_PN_05", "id": "197", - "description": "PN0.5 measurement from SEN5X", - "units": "#/0.1l" + "description": "PN0.5 measurement from SEN5X", + "units": "#/0.1l" }, { - "name": ,"SEN5X_PN_1" + "name": "SEN5X_PN_1", "id": "198", - "description": "PN1.0 measurement from SEN5X", - "units": "#/0.1l" + "description": "PN1.0 measurement from SEN5X", + "units": "#/0.1l" }, { - "name": ,"SEN5X_PN_25" + "name": "SEN5X_PN_25", "id": "199", - "description": "PN2.5 measurement from SEN5X", - "units": "#/0.1l" + "description": "PN2.5 measurement from SEN5X", + "units": "#/0.1l" }, { - "name": ,"SEN5X_PN_40" + "name": "SEN5X_PN_40", "id": "200", - "description": "PN4.0 measurement from SEN5X", - "units": "#/0.1l" + "description": "PN4.0 measurement from SEN5X", + "units": "#/0.1l" }, { - "name": ,"SEN5X_PN_10" + "name": "SEN5X_PN_10", "id": "201", - "description": "PN10.0 measurement from SEN5X", - "units": "#/0.1l" + "description": "PN10.0 measurement from SEN5X", + "units": "#/0.1l" }, { - "name": ,"SEN5X_TPS" + "name": "SEN5X_TPS", "id": "202", - "description": "Typical Particle Size from SEN5X", - "units": "um" + "description": "Typical Particle Size from SEN5X", + "units": "um" }, { "name": "SEN5X_TEMP", From 7e791e916f3494f40209756d98597e07ea6b4cfe Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 18 Dec 2023 13:00:12 +0100 Subject: [PATCH 10/72] Fix names --- names/SCDevice.json | 284 ++++++++++++++++++++++---------------------- 1 file changed, 145 insertions(+), 139 deletions(-) diff --git a/names/SCDevice.json b/names/SCDevice.json index cd25cc76..299bd067 100644 --- a/names/SCDevice.json +++ b/names/SCDevice.json @@ -3,834 +3,840 @@ "name": "BATT", "id": "10", "description": "Battery", - "units": "%" + "unit": "%" }, { "name": "BATT_VOLT", "id": "0", "description": "Battery voltage", - "units": "V" + "unit": "V" }, { "name": "SDCARD", "id": "0", "description": "SDcard present", - "units": "Present" + "unit": "Present" }, { "name": "LIGHT", "id": "14", "description": "Light", - "units": "Lux" + "unit": "Lux" }, { "name": "TEMP", "id": "55", "description": "Temperature", - "units": "C" + "unit": "C" }, { "name": "HUM", "id": "56", "description": "Humidity", - "units": "%rh" + "unit": "%rh" }, { "name": "NOISE_A", "id": "53", "description": "Noise dBA", - "units": "dBA" + "unit": "dBA" }, { "name": "NOISE_B", "id": "0", "description": "Noise dBC", - "units": "dBC" + "unit": "dBC" }, { "name": "NOISE_Z", "id": "0", "description": "Noise dBZ", - "units": "dB" + "unit": "dB" }, { "name": "NOISE_FFT", "id": "0", "description": "Noise FFT", - "units": " " + "unit": " " }, { "name": "ALT", "id": "0", "description": "Altitude", - "units": "M" + "unit": "M" }, { "name": "PRESS", "id": "58", "description": "Barometric pressure", - "units": "kPa" + "unit": "kPa" }, { "name": "PRESS_TEMP", "id": "0", "description": "Pressure internal temperature", - "units": "C" + "unit": "C" }, { "name": "CCS811_VOCS", "id": "113", "description": "VOC Gas CCS811", - "units": "ppb" + "unit": "ppb" }, { "name": "CCS811_ECO2", "id": "112", "description": "eCO2 Gas CCS811", - "units": "ppm" + "unit": "ppm" }, { "name": "PMS5003_PM_1", "id": "89", "description": "PM 1.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_PM_25", "id": "87", "description": "PM 2.5", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_PM_10", "id": "88", "description": "PM 10.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_PN_03", "id": "165", "description": "PN 0.3", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_PN_05", "id": "166", "description": "PN 0.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_PN_1", "id": "167", "description": "PN 1.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_PN_25", "id": "168", "description": "PN 2.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_PN_5", "id": "169", "description": "PN 5.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_PN_10", "id": "170", "description": "PN 10.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "GB_1A", "id": "65", "description": "Gases Board 1A", - "units": "mV" + "unit": "mV" }, { "name": "GB_1W", "id": "64", "description": "Gases Board 1W", - "units": "mV" + "unit": "mV" }, { "name": "GB_2A", "id": "62", "description": "Gases Board 2A", - "units": "mV" + "unit": "mV" }, { "name": "GB_2W", "id": "61", "description": "Gases Board 2W", - "units": "mV" + "unit": "mV" }, { "name": "GB_3A", "id": "68", "description": "Gases Board 3A", - "units": "mV" + "unit": "mV" }, { "name": "GB_3W", "id": "67", "description": "Gases Board 3W", - "units": "mV" + "unit": "mV" }, { "name": "GB_TEMP", "id": "79", "description": "Gases Board Temperature", - "units": "C" + "unit": "C" }, { "name": "GB_HUM", "id": "80", "description": "Gases Board Humidity", - "units": "%" + "unit": "%" }, { "name": "GR_ADC", "id": "25", "description": "Groove ADC", - "units": "V" + "unit": "V" }, { "name": "INA_VBUS", "id": "0", "description": "INA219 Bus voltage", - "units": "V" + "unit": "V" }, { "name": "INA_VSHUNT", "id": "0", "description": "INA219 Shunt voltage", - "units": "mV" + "unit": "mV" }, { "name": "INA_CURR", "id": "0", "description": "INA219 Current", - "units": "mA" + "unit": "mA" }, { "name": "INA_VLOAD", "id": "0", "description": "INA219 Load voltage", - "units": "V" + "unit": "V" }, { "name": "DS_WAT_TEMP", "id": "42", "description": "DS18B20 Water temperature", - "units": "C" + "unit": "C" }, { "name": "AS_TEMP", "id": "51", "description": "Atlas Temperature", - "units": "C" + "unit": "C" }, { "name": "AS_PH", "id": "43", "description": "Atlas PH", - "units": "pH" + "unit": "pH" }, { "name": "AS_COND", "id": "45", "description": "Atlas Conductivity", - "units": "uS/cm" + "unit": "uS/cm" }, { "name": "AS_TDS", "id": "122", "description": "Atlas Total Dissolved Solids", - "units": "ppm" + "unit": "ppm" }, { "name": "AS_SAL", "id": "123", "description": "Atlas Salinity", - "units": "PSU(ppt)" + "unit": "PSU(ppt)" }, { "name": "AS_SG", "id": "46", "description": "Atlas Specific gravity", - "units": "" + "unit": "" }, { "name": "AS_DO", "id": "48", "description": "Atlas Dissolved Oxygen", - "units": "mg/L" + "unit": "mg/L" }, { "name": "AS_DO_SAT", "id": "49", "description": "Atlas DO Saturation", - "units": "%" + "unit": "%" }, { "name": "AS_ORP", "id": "164", "description": "Atlas Redox potential", - "units": "mV" + "unit": "mV" }, { "name": "CHRP_MOIS_RAW", "id": "0", "description": "Soil Moisture Raw", - "units": "" + "unit": "" }, { "name": "CHRP_MOIS", "id": "50", "description": "Soil Moisture Percent", - "units": "%" + "unit": "%" }, { "name": "CHRP_TEMP", "id": "0", "description": "Soil Temperature", - "units": "C" + "unit": "C" }, { "name": "CHRP_LIGHT", "id": "0", "description": "Soil Light", - "units": "" + "unit": "" }, { "name": "PMS5003_EXT_PM_A_1", "id": "71", "description": "Ext PM_A 1.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PM_A_25", "id": "72", "description": "Ext PM_A 2.5", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PM_A_10", "id": "73", "description": "Ext PM_A 10.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PN_A_03", "id": "99", "description": "Ext PN_A 0.3", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_A_05", "id": "100", "description": "Ext PN_A 0.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_A_1", "id": "101", "description": "Ext PN_A 1.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_A_25", "id": "102", "description": "Ext PN_A 2.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_A_5", "id": "103", "description": "Ext PN_A 5.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_A_10", "id": "104", "description": "Ext PN_A 10.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PM_B_1", "id": "75", "description": "Ext PM_B 1.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PM_B_25", "id": "76", "description": "Ext PM_B 2.5", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PM_B_10", "id": "77", "description": "Ext PM_B 10.0", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "PMS5003_EXT_PN_B_03", "id": "105", "description": "Ext PN_B 0.3", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_B_05", "id": "106", "description": "Ext PN_B 0.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_B_1", "id": "107", "description": "Ext PN_B 1.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_B_25", "id": "108", "description": "Ext PN_B 2.5", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_B_5", "id": "109", "description": "Ext PN_B 5.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PMS5003_EXT_PN_B_10", "id": "110", "description": "Ext PN_B 10.0", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "PM_DALLAS_TEMP", "id": "96", "description": "PM board Dallas Temperature", - "units": "C" + "unit": "C" }, { "name": "DALLAS_TEMP", "id": "96", "description": "Direct Dallas Temperature", - "units": "C" + "unit": "C" }, { "name": "SHT31_EXT_TEMP", "id": "79", "description": "Ext SHT31 Temperature", - "units": "C" + "unit": "C" }, { "name": "SHT31_EXT_HUM", "id": "80", "description": "Ext SHT31 Humidity", - "units": "%" + "unit": "%" }, { "name": "SHT35_EXT_TEMP", "id": "0", "description": "Ext SHT35 Temperature", - "units": "C" + "unit": "C" }, { "name": "SHT35_EXT_HUM", "id": "0", "description": "Ext SHT35 Humidity", - "units": "%" + "unit": "%" }, { "name": "EXT_RANGE_LIGHT", "id": "0", "description": "Ext Range Light", - "units": "Lux" + "unit": "Lux" }, { "name": "EXT_RANGE_DIST", "id": "98", "description": "Ext Range Distance", - "units": "mm" + "unit": "mm" }, { "name": "BME680_TEMP", "id": "0", "description": "Temperature BME680", - "units": "C" + "unit": "C" }, { "name": "BME680_HUM", "id": "0", "description": "Humidity BME680", - "units": "%" + "unit": "%" }, { "name": "BME680_PRESS", "id": "0", "description": "Barometric pressure BME680", - "units": "kPa" + "unit": "kPa" }, { "name": "BME680_VOCS", "id": "0", "description": "VOC Gas BME680", - "units": "Ohms" + "unit": "Ohms" }, { "name": "GPS_FIX", "id": "128", "description": "GPS Fix Quality", - "units": "" + "unit": "" }, { "name": "GPS_LAT", "id": "125", "description": "GPS Latitude", - "units": "Deg" + "unit": "Deg" }, { "name": "GPS_LONG", "id": "126", "description": "GPS Longitude", - "units": "Deg" + "unit": "Deg" }, { "name": "GPS_ALT", "id": "127", "description": "GPS Altitude", - "units": "m" + "unit": "m" }, { "name": "GPS_SPEED", "id": "129", "description": "GPS Speed", - "units": "m/s" + "unit": "m/s" }, { "name": "GPS_HDOP", "id": "131", "description": "GPS Horizontal Dilution of Position", - "units": "" + "unit": "" }, { "name": "GPS_SATNUM", "id": "130", "description": "GPS Traked Satellites", - "units": "" + "unit": "" }, { "name": "ADC_48_0", "id": "133", "description": "ADS1x15 ADC 0x48 Ch0", - "units": "V" + "unit": "V" }, { "name": "ADC_48_1", "id": "134", "description": "ADS1x15 ADC 0x48 Ch1", - "units": "V" + "unit": "V" }, { "name": "ADC_48_2", "id": "135", "description": "ADS1x15 ADC 0x48 Ch2", - "units": "V" + "unit": "V" }, { "name": "ADC_48_3", "id": "136", "description": "ADS1x15 ADC 0x48 Ch3", - "units": "V" + "unit": "V" }, { "name": "ADC_49_0", "id": "138", "description": "ADS1x15 ADC 0x49 Ch0", - "units": "V" + "unit": "V" }, { "name": "ADC_49_1", "id": "139", "description": "ADS1x15 ADC 0x49 Ch1", - "units": "V" + "unit": "V" }, { "name": "ADC_49_2", "id": "140", "description": "ADS1x15 ADC 0x49 Ch2", - "units": "V" + "unit": "V" }, { "name": "ADC_49_3", "id": "141", "description": "ADS1x15 ADC 0x49 Ch3", - "units": "V" + "unit": "V" }, { "name": "ADC_4A_0", "id": "143", "description": "ADS1x15 ADC 0x4A Ch0", - "units": "V" + "unit": "V" }, { "name": "ADC_4A_1", "id": "144", "description": "ADS1x15 ADC 0x4A Ch1", - "units": "V" + "unit": "V" }, { "name": "ADC_4A_2", "id": "145", "description": "ADS1x15 ADC 0x4A Ch2", - "units": "V" + "unit": "V" }, { "name": "ADC_4A_3", "id": "146", "description": "ADS1x15 ADC 0x4A Ch3", - "units": "V" + "unit": "V" }, { "name": "ADC_4B_0", "id": "148", "description": "ADS1x15 ADC 0x4B Ch0", - "units": "V" + "unit": "V" }, { "name": "ADC_4B_1", "id": "149", "description": "ADS1x15 ADC 0x4B Ch1", - "units": "V" + "unit": "V" }, { "name": "ADC_4B_2", "id": "150", "description": "ADS1x15 ADC 0x4B Ch2", - "units": "V" + "unit": "V" }, { "name": "ADC_4B_3", "id": "151", "description": "ADS1x15 ADC 0x4B Ch3", - "units": "V" + "unit": "V" }, { "name": "SCD30_CO2", "id": "158", "description": "SCD30 CO2", - "units": "ppm" + "unit": "ppm" }, { "name": "SCD30_TEMP", "id": "160", "description": "SCD30 Temperature", - "units": "C" + "unit": "C" }, { "name": "SCD30_HUM", "id": "161", "description": "SCD30 Humidity", - "units": "%" + "unit": "%" }, { "name": "SFA30_HCHO", "id": "212", "description": "SFA30 HCHO", - "units": "ppb" + "unit": "ppb" }, { "name": "SFA30_TEMP", "id": "211", "description": "SFA30 Temperature", - "units": "C" + "unit": "C" }, { "name": "SFA30_HUM", "id": "210", "description": "SFA30 Humidity", - "units": "%" + "unit": "%" }, { "name": "RSSI", "id": "220", "description": "Wi-Fi RSSI", - "units": "dBm" + "unit": "dBm" }, { "name": "SD-card", "id": "221", "description": "SD card presence", - "units": "" + "unit": "" }, { "name": "GR_OLED", "id": "0", "description": "Groove OLED", - "units": "" + "unit": "" }, { "name": "SPS30_PM_1", "id": "182", "description": "PM1 measurement from SPS30", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SPS30_PM_10", "id": "185", "description": "PM10 measurement from SPS30", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SPS30_PM_25", "id": "183", "description": "PM2.5 measurement from SPS30", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SPS30_PM_40", "id": "184", "description": "PM4.0 measurement from SPS30", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SPS30_PN_05", "id": "186", "description": "PN0.5 measurement from SPS30", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SPS30_PN_1", "id": "187", "description": "PN1.0 measurement from SPS30", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SPS30_PN_25", "id": "188", "description": "PN2.5 measurement from SPS30", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SPS30_PN_40", "id": "189", "description": "PN4.0 measurement from SPS30", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SPS30_PN_10", "id": "190", "description": "PN10.0 measurement from SPS30", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SPS30_TPS", "id": "191", "description": "Typical Particle Size from SPS30", - "units": "um" + "unit": "um" }, { "name": "SEN5X_PM_1", "id": "193", "description": "PM1 measurement from SEN5X", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SEN5X_PM_10", "id": "196", "description": "PM10 measurement from SEN5X", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SEN5X_PM_25", "id": "194", "description": "PM2.5 measurement from SEN5X", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SEN5X_PM_40", "id": "195", "description": "PM4.0 measurement from SEN5X", - "units": "ug/m3" + "unit": "ug/m3" }, { "name": "SEN5X_PN_05", "id": "197", "description": "PN0.5 measurement from SEN5X", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SEN5X_PN_1", "id": "198", "description": "PN1.0 measurement from SEN5X", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SEN5X_PN_25", "id": "199", "description": "PN2.5 measurement from SEN5X", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SEN5X_PN_40", "id": "200", "description": "PN4.0 measurement from SEN5X", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SEN5X_PN_10", "id": "201", "description": "PN10.0 measurement from SEN5X", - "units": "#/0.1l" + "unit": "#/0.1l" }, { "name": "SEN5X_TPS", "id": "202", "description": "Typical Particle Size from SEN5X", - "units": "um" + "unit": "um" }, { "name": "SEN5X_TEMP", "id": "204", "description": "SEN5X Temperature", - "units": "C" + "unit": "C" }, { "name": "SEN5X_HUM", "id": "203", "description": "SEN5X Humidity", - "units": "%rh" + "unit": "%rh" }, { "name": "SEN5X_VOCS_IDX", "id": "205", "description": "SEN5X VOCs Index", - "units": "" + "unit": "" }, { "name": "SEN5X_NOX_IDX", "id": "206", "description": "SEN5X NOX Index", - "units": "" + "unit": "" }, { "name": "SEN5X_VOCS_RAW", "id": "207", "description": "SEN5X VOCs raw", - "units": "" + "unit": "" }, { "name": "SEN5X_NOX_RAW", "id": "208", "description": "SEN5X NOX raw", - "units": "" + "unit": "" + }, + { + "name": "VEML6070_UVA", + "id": "159", + "description": "VEML6070 UVA", + "unit": "" } ] \ No newline at end of file From 7e117a3c4ddb89db10c564085fcc0392641d29df Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 18 Dec 2023 18:52:51 +0100 Subject: [PATCH 11/72] Improvements on blueprint for consistency --- blueprints/sc_air.json | 242 ++++++++++++----------------------------- 1 file changed, 71 insertions(+), 171 deletions(-) diff --git a/blueprints/sc_air.json b/blueprints/sc_air.json index 0acffd5c..890868fb 100644 --- a/blueprints/sc_air.json +++ b/blueprints/sc_air.json @@ -9,106 +9,6 @@ "resample": false, "source": null, "metrics": [ - { - "name": "CCS811_ECO2_CLEAN", - "description": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "channel": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - { - "name":"CCS811_VOCS_CLEAN", - "description": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "channel": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - { - "name": "EXT_PM_10_CLEAN", - "description": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "channels": [ - "EXT_PM_A_10", - "EXT_PM_B_10" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 88 - }, - { - "name":"EXT_PM_1_CLEAN", - "description": "PM1 calculated based on both PMS5003 PM1 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "channels": [ - "EXT_PM_A_1", - "EXT_PM_B_1" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 89 - }, - { - "name": "EXT_PM_25_CLEAN", - "description": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "channels": [ - "EXT_PM_A_25", - "EXT_PM_B_25" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 87 - }, { "name": "PT1000_POS", "description": "PT1000 raw value", @@ -117,8 +17,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "ASPT1000", @@ -130,8 +30,8 @@ "afe_id": null }, "post": false, - "process": "alphasense_pt1000", - "units": "degC" + "function": "alphasense_pt1000", + "unit": "degC" }, { "name": "EC_SENSOR_TEMP", @@ -141,8 +41,8 @@ "priority": "ASPT1000" }, "post": false, - "process": "ec_sensor_temp", - "units": "degC" + "function": "ec_sensor_temp", + "unit": "degC" }, { "name": "CO_WE", @@ -152,8 +52,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "CO_AE", @@ -163,8 +63,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "NO2_WE", @@ -174,8 +74,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "NO2_AE", @@ -185,8 +85,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "NO_WE", @@ -196,8 +96,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "NO_AE", @@ -207,8 +107,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "SO2_WE", @@ -218,8 +118,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "SO2_AE", @@ -229,8 +129,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "H2S_WE", @@ -240,8 +140,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "H2S_AE", @@ -251,8 +151,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "OX_WE", @@ -262,8 +162,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name": "OX_AE", @@ -273,8 +173,8 @@ "channel": null }, "post": false, - "process": "channel_names", - "units": "V" + "function": "channel_names", + "unit": "V" }, { "name":"CO", @@ -287,8 +187,8 @@ "we": null }, "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "function": "alphasense_803_04", + "unit": "ppb" }, { "name":"NO2", @@ -301,65 +201,65 @@ "we": null }, "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "function": "alphasense_803_04", + "unit": "ppb" }, { "name":"O3", "description": "Calculation of O3 based on AAN 803-04", "id": 157, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" }, { "name":"SO2", "description": "Calculation of SO2 based on AAN 803-04", "id": 155, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null, - "use_alternative": true - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null, + "use_alternative": true + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" }, { "name": "NO", "description": "Calculation of NO based on AAN 803-04", "id": 154, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" }, { "name": "H2S", "description": "Calculation of H2S based on AAN 803-04", "id": 156, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" } ], "sources": [ From 9ce7a90e13910327393b8c54b9804a4b33c69ebd Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 2 Jan 2024 09:55:27 +0100 Subject: [PATCH 12/72] Update blueprint model --- blueprints/sc_air.json | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/blueprints/sc_air.json b/blueprints/sc_air.json index 890868fb..954be2e5 100644 --- a/blueprints/sc_air.json +++ b/blueprints/sc_air.json @@ -1,12 +1,16 @@ { - "documentation": "https://docs.smartcitizen.me/", - "id": null, - "clean_na": null, - "timezone": null, - "frequency": null, - "max_date": null, - "min_date": null, - "resample": false, + "meta": { + "timezone": null, + "documentation": "https://docs.smartcitizen.me/" + }, + "params": { + "id": null, + "clean_na": null, + "frequency": null, + "max_date": null, + "min_date": null, + "resample": false + }, "source": null, "metrics": [ { From 0212f6c8fe3805c3dac3fe4058626f5f41052358 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 2 Jan 2024 10:18:33 +0100 Subject: [PATCH 13/72] Move over to separate blueprints per source --- blueprints/sc_air_api.json | 264 +++++++++++++++++++ blueprints/{sc_air.json => sc_air_file.json} | 19 +- 2 files changed, 265 insertions(+), 18 deletions(-) create mode 100644 blueprints/sc_air_api.json rename blueprints/{sc_air.json => sc_air_file.json} (95%) diff --git a/blueprints/sc_air_api.json b/blueprints/sc_air_api.json new file mode 100644 index 00000000..314f1be2 --- /dev/null +++ b/blueprints/sc_air_api.json @@ -0,0 +1,264 @@ +{ + "meta": { + "documentation": "https://docs.smartcitizen.me/" + }, + "metrics": [ + { + "name": "PT1000_POS", + "description": "PT1000 raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "ASPT1000", + "description": "PT1000 temperature calculation in AFE", + "id": null, + "kwargs": { + "pt1000minus": null, + "pt1000plus": null, + "afe_id": null + }, + "post": false, + "function": "alphasense_pt1000", + "unit": "degC" + }, + { + "name": "EC_SENSOR_TEMP", + "description": "Electrochemical sensor temperature", + "id": null, + "kwargs": { + "priority": "ASPT1000" + }, + "post": false, + "function": "ec_sensor_temp", + "unit": "degC" + }, + { + "name": "CO_WE", + "description": "CO working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "CO_AE", + "description": "CO auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "NO2_WE", + "description": "NO2 working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "NO2_AE", + "description": "NO2 auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "NO_WE", + "description": "NO working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "NO_AE", + "description": "NO auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "SO2_WE", + "description": "SO2 working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "SO2_AE", + "description": "SO2 auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "H2S_WE", + "description": "H2S working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "H2S_AE", + "description": "H2S auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "OX_WE", + "description": "OX working electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name": "OX_AE", + "description": "OX auxiliary electrode raw value", + "id": null, + "kwargs": { + "channel": null + }, + "post": false, + "function": "channel_names", + "unit": "V" + }, + { + "name":"CO", + "description": "Calculation of CO based on AAN 803-04", + "id": 152, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + }, + { + "name":"NO2", + "description": "Calculation of NO2 based on AAN 803-04", + "id": 153, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + }, + { + "name":"O3", + "description": "Calculation of O3 based on AAN 803-04", + "id": 157, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + }, + { + "name":"SO2", + "description": "Calculation of SO2 based on AAN 803-04", + "id": 155, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null, + "use_alternative": true + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + }, +{ + "name": "NO", + "description": "Calculation of NO based on AAN 803-04", + "id": 154, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + }, + { + "name": "H2S", + "description": "Calculation of H2S based on AAN 803-04", + "id": 156, + "kwargs": { + "ae": null, + "alphasense_id": null, + "t": "EC_SENSOR_TEMP", + "we": null + }, + "post": true, + "function": "alphasense_803_04", + "unit": "ppb" + } + ], + "source":{ + "type": "api", + "handler": "SCDevice", + "module": "smartcitizen_connector" + } +} diff --git a/blueprints/sc_air.json b/blueprints/sc_air_file.json similarity index 95% rename from blueprints/sc_air.json rename to blueprints/sc_air_file.json index 954be2e5..b1831fb6 100644 --- a/blueprints/sc_air.json +++ b/blueprints/sc_air_file.json @@ -1,17 +1,7 @@ { "meta": { - "timezone": null, "documentation": "https://docs.smartcitizen.me/" }, - "params": { - "id": null, - "clean_na": null, - "frequency": null, - "max_date": null, - "min_date": null, - "resample": false - }, - "source": null, "metrics": [ { "name": "PT1000_POS", @@ -266,13 +256,7 @@ "unit": "ppb" } ], - "sources": [ - { - "type": "api", - "handler": "SCDevice", - "module": "smartcitizen_connector" - }, - { + "source": { "type": "csv", "module": "scdata.io.csv", "handler": "csv_handler", @@ -291,5 +275,4 @@ "raw-data-file": null } } - ] } From c1628121232d559c20d72309df076668869690d0 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 2 Jan 2024 10:21:34 +0100 Subject: [PATCH 14/72] Hardware test to api --- hardware/SCAS210099.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hardware/SCAS210099.json b/hardware/SCAS210099.json index 9f5bc5e3..095fcea3 100644 --- a/hardware/SCAS210099.json +++ b/hardware/SCAS210099.json @@ -1,5 +1,5 @@ { - "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/blueprints/sc_air.json", + "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/blueprints/sc_air_api.json", "description": "2PMS5003-2ELEC-AFE", "versions": [ { From 3e4e3f893199353db17f38d16c8f723a2eb1f6da Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 12 Feb 2024 19:52:12 +0100 Subject: [PATCH 15/72] Add water blueprint --- blueprints/sc_water_api.json | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 blueprints/sc_water_api.json diff --git a/blueprints/sc_water_api.json b/blueprints/sc_water_api.json new file mode 100644 index 00000000..eb6490ce --- /dev/null +++ b/blueprints/sc_water_api.json @@ -0,0 +1,11 @@ +{ + "meta": { + "documentation": "https://docs.smartcitizen.me/" + }, + "metrics": [], + "source":{ + "type": "api", + "handler": "SCDevice", + "module": "smartcitizen_connector" + } +} From aec14ecdbcf60cfe57b11c86c844319b3d8feefa Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 12 Feb 2024 19:52:48 +0100 Subject: [PATCH 16/72] Minor cosmetic fix --- blueprints/sc_air_api.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blueprints/sc_air_api.json b/blueprints/sc_air_api.json index 314f1be2..f5fede90 100644 --- a/blueprints/sc_air_api.json +++ b/blueprints/sc_air_api.json @@ -227,7 +227,7 @@ "function": "alphasense_803_04", "unit": "ppb" }, -{ + { "name": "NO", "description": "Calculation of NO based on AAN 803-04", "id": 154, From 0f0659361e21218fb595b336e07e3df25a086623 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 20 Feb 2024 16:55:20 +0100 Subject: [PATCH 17/72] Update gitignore --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index dc0a0ebf..70b57cc1 100644 --- a/.gitignore +++ b/.gitignore @@ -155,4 +155,6 @@ ENV/ tests/local -scripts/ \ No newline at end of file +scripts/ + +.vscode \ No newline at end of file From b508a9d17230cba6edf1e72dc4237d69fbda3c23 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:03:38 +0200 Subject: [PATCH 18/72] Add workflow for publish to pypi --- .github/workflows/python-publish.yml | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/python-publish.yml diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 00000000..bdaab28a --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,39 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} From 448703ee2c221d7bebe92c47c43fe0aae7900f76 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:04:41 +0200 Subject: [PATCH 19/72] Remove tasks --- tasks/README.md | 65 ----- tasks/chupiflow.py | 69 ----- tasks/chupiflow_ui/README.md | 29 -- tasks/chupiflow_ui/__init__.py | 0 tasks/chupiflow_ui/app.py | 121 -------- tasks/chupiflow_ui/cron.py | 88 ------ tasks/chupiflow_ui/extras.py | 24 -- .../static/bootstrap/css/bootstrap.min.css | 6 - .../bootstrap/css/bootstrap.min.css.map | 1 - .../static/bootstrap/js/bootstrap.min.js | 7 - tasks/chupiflow_ui/static/favicon.ico | Bin 3096 -> 0 bytes tasks/chupiflow_ui/static/style.css | 272 ------------------ tasks/chupiflow_ui/templates/editjob.html | 54 ---- tasks/chupiflow_ui/templates/file_viewer.html | 28 -- tasks/chupiflow_ui/templates/jobs.html | 66 ----- tasks/chupiflow_ui/templates/layout.html | 73 ----- tasks/chupiflow_ui/wsgi.py | 4 - tasks/dprocess.py | 54 ---- tasks/dschedule.py | 72 ----- tasks/requirements.txt | 1 - tasks/scheduler.py | 105 ------- 21 files changed, 1139 deletions(-) delete mode 100644 tasks/README.md delete mode 100644 tasks/chupiflow.py delete mode 100644 tasks/chupiflow_ui/README.md delete mode 100644 tasks/chupiflow_ui/__init__.py delete mode 100644 tasks/chupiflow_ui/app.py delete mode 100644 tasks/chupiflow_ui/cron.py delete mode 100644 tasks/chupiflow_ui/extras.py delete mode 100644 tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css delete mode 100644 tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css.map delete mode 100644 tasks/chupiflow_ui/static/bootstrap/js/bootstrap.min.js delete mode 100644 tasks/chupiflow_ui/static/favicon.ico delete mode 100644 tasks/chupiflow_ui/static/style.css delete mode 100644 tasks/chupiflow_ui/templates/editjob.html delete mode 100644 tasks/chupiflow_ui/templates/file_viewer.html delete mode 100644 tasks/chupiflow_ui/templates/jobs.html delete mode 100644 tasks/chupiflow_ui/templates/layout.html delete mode 100644 tasks/chupiflow_ui/wsgi.py delete mode 100755 tasks/dprocess.py delete mode 100644 tasks/dschedule.py delete mode 100644 tasks/requirements.txt delete mode 100644 tasks/scheduler.py diff --git a/tasks/README.md b/tasks/README.md deleted file mode 100644 index 4009687e..00000000 --- a/tasks/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Tasks - -Tasks are managed by `chupiflow.py` script and ultimately by `scheduler.py` and CronTab, thanks to `python-crontab` (full doc [here](https://gitlab.com/doctormo/python-crontab)). The script can program tasks in a automated or manual way. If done automatically, it can schedule them `@daily`, `@hourly` and `@minute`, with optional load balancing (not scheduling them all at the same time, but randomly in low load times). - -## Start scheduling - -This will schedule based on postprocessing information in the platform having a non-null value: - -``` -python chupiflow.py auto-schedule -``` - -or (optional dry-run for checks, force-first-run and overwritting if task is already there): - -``` -python chupiflow.py --dry-run --force-first-run --overwrite -``` - -Task status and tabfile will be saved in `~/.cache/scdata/tasks` by default. This can be changed in the config: - -``` -➜ tasks tree -L 2 -. -├── 13238 -│   └── 13238.log -├── 13486 -├── README.md -├── scheduler.log -└── tabfile.tab -``` - -## Manual scheduling - -This will schedule a device regardless the auto-scheduling: - -``` -python chupiflow.py manual-schedule --device --dry-run --force-first-run --overwrite -``` - -## Local deployment - -You would need to replace the following lines in `tasks/chupiflow_ui/app.py` for local deployment: - -``` -from cron import parsetabfiles, validate, savetabfiles, triggercrontab -from extras import get_dpath -``` - -Into: - -``` -from .cron import parsetabfiles, validate, savetabfiles, triggercrontab -from .extras import get_dpath -``` - -Then you can run: - -``` -export FLASK_APP=app.py -flask run -``` - -## Statement - -This code was made as a response to the lack of simple task schedulers that are not thought to be running in Azure, Kubernetes or whatever else (`airflow` gets the reference for the name). Nothing against it, just too complex for what could be solved with simple `cron` scheduling. diff --git a/tasks/chupiflow.py b/tasks/chupiflow.py deleted file mode 100644 index ce463bdf..00000000 --- a/tasks/chupiflow.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -from os.path import join -from os import makedirs - -if __name__ == '__main__': - - if '-h' in sys.argv or '--help' in sys.argv: - print('chupiflow: Process device of SC API') - print('USAGE:\n\rchupiflow.py [options] action') - print('options:') - print('--dry-run: dry run') - print('--force-first-run: force first time running job') - print('--overwrite: overwrite if it exists already') - print('actions: auto-schedule or device-schedule') - print('auto-schedule --interval-days (config._postprocessing_interval_hours):') - print('\tschedule devices postproccesing check based on device postprocessing in platform') - print('\tauto-schedule makes a global task for checking on interval-days interval and then the actual tasks are scheduled based on default intervals') - print('manual-schedule --device --interval-hours (config._postprocessing_interval_hours):') - print('\tschedule device processing manually') - sys.exit() - - from scheduler import Scheduler - from scdata._config import config - - if '--dry-run' in sys.argv: dry_run = True - else: dry_run = False - - if '--force-first-run' in sys.argv: force_first_run = True - else: force_first_run = False - - if '--overwrite' in sys.argv: overwrite = True - else: overwrite = False - - if 'auto-schedule' in sys.argv: - if '--interval-days' in sys.argv: - interval = int(sys.argv[sys.argv.index('--interval-days')+1]) - else: - interval = config._scheduler_interval_days - - s = Scheduler() - s.schedule_task(task = f'{config._device_scheduler}.py', - log = join(config.paths['tasks'], config._scheduler_log), - interval = f'{interval}D', - force_first_run = force_first_run, - overwrite = overwrite, - dry_run = dry_run) - sys.exit() - - if 'manual-schedule' in sys.argv: - if '--device' not in sys.argv: - print ('Cannot process without a devide ID') - sys.exit() - if '--interval-hours' in sys.argv: - interval = int(sys.argv[sys.argv.index('--interval-hours')+1]) - else: - interval = config._postprocessing_interval_hours - # Setup scheduler - s = Scheduler() - device = int(sys.argv[sys.argv.index('--device')+1]) - dt = join(config.paths['tasks'], str(device)) - makedirs(dt, exist_ok=True) - - s.schedule_task(task = f'{config._device_processor}.py --device {device}', - log = join(dt, f'{device}.log'), - interval = f'{interval}H', - force_first_run = force_first_run, - overwrite = overwrite, - dry_run = dry_run) - sys.exit() \ No newline at end of file diff --git a/tasks/chupiflow_ui/README.md b/tasks/chupiflow_ui/README.md deleted file mode 100644 index 91edc504..00000000 --- a/tasks/chupiflow_ui/README.md +++ /dev/null @@ -1,29 +0,0 @@ -A small flask app to manage post-processing tasks with chupiflow. -Not a professional web-app. Consider setting it up with `nginx` https login. - -## Running - -A service can be started in `/etc/systemd/system/chupiflow.service` using `gunicorn`. Follow [this instructional](https://www.digitalocean.com/community/tutorials/how-to-serve-flask-applications-with-gunicorn-and-nginx-on-ubuntu-18-04) to set it up. Note that everything is stored in `/home/smartcitizen-data` and that necessary tokens are stored in a `.env` file. - -``` -[Unit] -Description=Gunicorn instance to serve chupiflow-ui -After=network.target - -[Service] -User=root -Group=www-data -EnvironmentFile=-/home/smartcitizen-data/.env -WorkingDirectory=/home/smartcitizen-data/tasks/chupiflow_ui -ExecStart=/usr/local/bin/gunicorn --workers 3 --bind unix:chupiflow.sock -m 007 wsgi:app - -[Install] -WantedBy=multi-user.target -``` - -Normal service routines apply: - -``` -systemctl start chupiflow.service -systemctl enable chupiflow.service -``` \ No newline at end of file diff --git a/tasks/chupiflow_ui/__init__.py b/tasks/chupiflow_ui/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tasks/chupiflow_ui/app.py b/tasks/chupiflow_ui/app.py deleted file mode 100644 index f6da5579..00000000 --- a/tasks/chupiflow_ui/app.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -from flask import Flask, request, render_template, redirect, url_for -import json -from cron import parsetabfiles, validate, savetabfiles, triggercrontab -from extras import get_dpath - -app = Flask(__name__) - -dpath=None -cronthread = {} - -@app.route('/', methods = ['GET', 'POST']) -def default(): - global dpath - error = None - if request.method == 'POST': - request.get_data() - dpath = request.form['path-tab-file'] - if not dpath: error = 'Not a valid path' - else: - if dpath == '' or dpath is None: dpath=get_dpath() - tabfiles = parsetabfiles(path=dpath) - return render_template("jobs.html", tabfiles=tabfiles, defaultpath=dpath, error=error) - -@app.route('/editjob/-', methods = ['POST', 'GET']) -def editjob(tabfile,cron,error=None): - tabfiles=parsetabfiles(path=dpath) - if request.method == 'POST': - request.get_data() - # Form input - log=request.form["logfile-input"] - task=request.form["task-input"] - schedule=request.form["schedule-input"] - if request.form.getlist("enabled-input") == ['on']: enabled=True - else: enabled=False - who=request.form["who-input"] - # Validate - error=validate(schedule, who, task, log) - tabfiles[tabfile][cron]['logfile']=log - tabfiles[tabfile][cron]['task']=task - tabfiles[tabfile][cron]['schedule']=schedule - tabfiles[tabfile][cron]['enabled']=enabled - tabfiles[tabfile][cron]['who']=who - # Check error - if not error: - savetabfiles(tabfiles=tabfiles, path=dpath) - return redirect(url_for("default")) - # Overwrite schedule to cope with lists or normal strings options - tabfiles[tabfile][cron]['schedule'] = str(tabfiles[tabfile][cron]['schedule']) - crondict=tabfiles[tabfile][cron] - - return render_template("editjob.html", tabfile=tabfile, cron=cron, crondict=crondict, error=error) - -@app.route('/triggerjob/-', methods = ['POST']) -def triggerjob(tabfile,cron): - global cronthread - tabfiles=parsetabfiles(path=dpath) - if request.method == 'POST': - cronthread[cron] = triggercrontab(dpath,tabfile,cron) - if cronthread[cron] == False: - error = "Job could not run as it's not valid" - return render_template("jobs.html", tabfiles=tabfiles, defaultpath=dpath, error=error) - else: - return redirect(url_for("logfile", tabfile=tabfile, cron=cron)) - -@app.route('/tabfiles/') -def tabfile(tabfile): - tabfiles = parsetabfiles(path=dpath) - tabpath = f"{dpath}/{tabfile}.tab" - tab = [] - with open(tabpath, 'r') as file: - _tab = file.readlines() - for line in _tab: - line = line.strip('\n') - if line != '': - tab.append(line) - return render_template("file_viewer.html", file_type='tabfile', file=tab) - -@app.route('/logfiles/-') -def logfile(tabfile, cron): - global cronthread - tabfiles = parsetabfiles(path=dpath) - logfile = tabfiles[tabfile][cron]['logfile'] - log = [] - with open(logfile, 'r') as file: - _log = file.readlines() - for line in _log: - line = line.strip('\n') - if line != '': - if '[31m' in line: - line = line.replace('\x1b[31m', '') - line = line.replace('\x1b[0m', '') - if '[33m' in line: - line = line.replace('\x1b[33m', '') - line = line.replace('\x1b[0m', '') - if '[32m' in line: - line = line.replace('\x1b[32m', '') - line = line.replace('\x1b[0m', '') - log.append(line) - if cron in cronthread: - status = cronthread[cron].status - else: - status = 'Not running' - return render_template("file_viewer.html", file_type='log', cron=cron, file=log, status = status) - -@app.route('/jobfiles/-') -def taskfile(tabfile, cron): - tabfiles = parsetabfiles(path=dpath) - taskfile = tabfiles[tabfile][cron]['task'].split(' ')[0] - task = [] - with open(taskfile, 'r') as file: - _task = file.readlines() - for line in _task: - line = line.strip('\n') - if line != '': - task.append(line) - return render_template("file_viewer.html", file_type='task', cron=cron, file=task) - -if __name__ == '__main__': - app.run(debug = True) diff --git a/tasks/chupiflow_ui/cron.py b/tasks/chupiflow_ui/cron.py deleted file mode 100644 index 60d33432..00000000 --- a/tasks/chupiflow_ui/cron.py +++ /dev/null @@ -1,88 +0,0 @@ -from os import listdir -from os.path import join -import traceback -import sys -import subprocess -from crontab import CronTab -import threading - -class CronThread(threading.Thread): - def __init__(self, job): - self.job = job - super().__init__() - self.status = 'init' - - def run(self): - self.status = 'running' - self.job.run() - self.status = 'done' - -def parsetabfiles(path): - tabfiles = {} - try: - for tabfile in listdir(path): - if tabfile.endswith('.tab'): - tname = tabfile.replace('.tab', '') - tabfiles[tname]=dict() - jobs=CronTab(tabfile=join(path, tabfile)) - - for job in jobs: - tabfiles[tname][job.comment]=dict() - tabfiles[tname][job.comment]['schedule']=job.slices - tabfiles[tname][job.comment]['enabled']=job.is_enabled() - tabfiles[tname][job.comment]['valid']=job.is_valid() - cl = job.command.split(' ') - tabfiles[tname][job.comment]['who']=cl[0] - tabfiles[tname][job.comment]['task']=' '.join(cl[1: cl.index('>>')]) - tabfiles[tname][job.comment]['logfile']=cl[cl.index('>>')+1:-1][0] - return tabfiles - except IOError: - traceback.print_exc() - pass - return "Unable to read file" - -def validate(schedule, who, task, log): - c=CronTab(user=True) - - command = f"{who} {task} >> {log} 2>&1" - j=c.new(command=command) - - if not j.is_valid(): return 'Command error' - - try: - j.setall(schedule) - except ValueError: - pass - return 'Time slice error' - - return None - -def triggercrontab(path,tabfile,cron): - print (f'Triggering {cron} from {tabfile}') - jobs=CronTab(tabfile=join(path, tabfile+'.tab')) - - for job in jobs: - if job.comment==cron: - if job.is_valid: - ct = CronThread(job) - ct.start() - return ct - else: - return False - -def savetabfiles(tabfiles, path): - for tabfile in tabfiles: - output = [] - for job in tabfiles[tabfile]: - if tabfiles[tabfile][job]['enabled']: enable = '' - else: enable = '# ' - line = f"{enable}{tabfiles[tabfile][job]['schedule']} {sys.executable} {tabfiles[tabfile][job]['task']} >> {tabfiles[tabfile][job]['logfile']} 2>&1 # {job}" - output.append(line) - - outputfile=open(join(path, f"{tabfile}.tab"),'w') - for line in output: - outputfile.write(line) - outputfile.write('\n') - outputfile.close() - subprocess.call(['crontab', join(path, f"{tabfile}.tab")]) - return 'Mierda de edit que has hecho' \ No newline at end of file diff --git a/tasks/chupiflow_ui/extras.py b/tasks/chupiflow_ui/extras.py deleted file mode 100644 index ccdae284..00000000 --- a/tasks/chupiflow_ui/extras.py +++ /dev/null @@ -1,24 +0,0 @@ -from os import environ, name -from os.path import expanduser, join, isdir - -def get_dpath(): - - # Check if windows - _mswin = name == "nt" - # Get user_home - _user_home = expanduser("~") - - # Get .cache dir - maybe change it if found in config.json - if _mswin: - _ddir = environ["APPDATA"] - elif 'XDG_CACHE_HOME' in environ: - _ddir = environ['XDG_CACHE_HOME'] - else: - _ddir = join(expanduser("~"), '.cache') - - dpath = join(_ddir, 'scdata', 'tasks') - - return dpath - -def check_path(path): - return isdir(path) \ No newline at end of file diff --git a/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css b/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css deleted file mode 100644 index a8da0748..00000000 --- a/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v4.0.0-alpha.6 (https://getbootstrap.com) - * Copyright 2011-2017 The Bootstrap Authors - * Copyright 2011-2017 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - *//*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,footer,header,nav,section{display:block}h1{font-size:2em;margin:.67em 0}figcaption,figure,main{display:block}figure{margin:1em 40px}hr{-webkit-box-sizing:content-box;box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;-webkit-text-decoration-skip:objects}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}dfn{font-style:italic}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}img{border-style:none}svg:not(:root){overflow:hidden}button,input,optgroup,select,textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{-webkit-box-sizing:border-box;box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{display:inline-block;vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{-webkit-box-sizing:border-box;box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details,menu{display:block}summary{display:list-item}canvas{display:inline-block}template{display:none}[hidden]{display:none}@media print{*,::after,::before,blockquote::first-letter,blockquote::first-line,div::first-letter,div::first-line,li::first-letter,li::first-line,p::first-letter,p::first-line{text-shadow:none!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}html{-webkit-box-sizing:border-box;box-sizing:border-box}*,::after,::before{-webkit-box-sizing:inherit;box-sizing:inherit}@-ms-viewport{width:device-width}html{-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}body{font-family:-apple-system,system-ui,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif;font-size:1rem;font-weight:400;line-height:1.5;color:#292b2c;background-color:#fff}[tabindex="-1"]:focus{outline:0!important}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{cursor:help}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}a{color:#0275d8;text-decoration:none}a:focus,a:hover{color:#014c8c;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus,a:not([href]):not([tabindex]):hover{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{vertical-align:middle}[role=button]{cursor:pointer}[role=button],a,area,button,input,label,select,summary,textarea{-ms-touch-action:manipulation;touch-action:manipulation}table{border-collapse:collapse;background-color:transparent}caption{padding-top:.75rem;padding-bottom:.75rem;color:#636c72;text-align:left;caption-side:bottom}th{text-align:left}label{display:inline-block;margin-bottom:.5rem}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,select,textarea{line-height:inherit}input[type=checkbox]:disabled,input[type=radio]:disabled{cursor:not-allowed}input[type=date],input[type=time],input[type=datetime-local],input[type=month]{-webkit-appearance:listbox}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit}input[type=search]{-webkit-appearance:none}output{display:inline-block}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:.5rem;font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1,h1{font-size:2.5rem}.h2,h2{font-size:2rem}.h3,h3{font-size:1.75rem}.h4,h4{font-size:1.5rem}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.1}.display-2{font-size:5.5rem;font-weight:300;line-height:1.1}.display-3{font-size:4.5rem;font-weight:300;line-height:1.1}.display-4{font-size:3.5rem;font-weight:300;line-height:1.1}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,.1)}.small,small{font-size:80%;font-weight:400}.mark,mark{padding:.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:5px}.initialism{font-size:90%;text-transform:uppercase}.blockquote{padding:.5rem 1rem;margin-bottom:1rem;font-size:1.25rem;border-left:.25rem solid #eceeef}.blockquote-footer{display:block;font-size:80%;color:#636c72}.blockquote-footer::before{content:"\2014 \00A0"}.blockquote-reverse{padding-right:1rem;padding-left:0;text-align:right;border-right:.25rem solid #eceeef;border-left:0}.blockquote-reverse .blockquote-footer::before{content:""}.blockquote-reverse .blockquote-footer::after{content:"\00A0 \2014"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #ddd;border-radius:.25rem;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:90%;color:#636c72}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}code{padding:.2rem .4rem;font-size:90%;color:#bd4147;background-color:#f7f7f9;border-radius:.25rem}a>code{padding:0;color:inherit;background-color:inherit}kbd{padding:.2rem .4rem;font-size:90%;color:#fff;background-color:#292b2c;border-radius:.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;margin-top:0;margin-bottom:1rem;font-size:90%;color:#292b2c}pre code{padding:0;font-size:inherit;color:inherit;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{position:relative;margin-left:auto;margin-right:auto;padding-right:15px;padding-left:15px}@media (min-width:576px){.container{padding-right:15px;padding-left:15px}}@media (min-width:768px){.container{padding-right:15px;padding-left:15px}}@media (min-width:992px){.container{padding-right:15px;padding-left:15px}}@media (min-width:1200px){.container{padding-right:15px;padding-left:15px}}@media (min-width:576px){.container{width:540px;max-width:100%}}@media (min-width:768px){.container{width:720px;max-width:100%}}@media (min-width:992px){.container{width:960px;max-width:100%}}@media (min-width:1200px){.container{width:1140px;max-width:100%}}.container-fluid{position:relative;margin-left:auto;margin-right:auto;padding-right:15px;padding-left:15px}@media (min-width:576px){.container-fluid{padding-right:15px;padding-left:15px}}@media (min-width:768px){.container-fluid{padding-right:15px;padding-left:15px}}@media (min-width:992px){.container-fluid{padding-right:15px;padding-left:15px}}@media (min-width:1200px){.container-fluid{padding-right:15px;padding-left:15px}}.row{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:wrap;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}@media (min-width:576px){.row{margin-right:-15px;margin-left:-15px}}@media (min-width:768px){.row{margin-right:-15px;margin-left:-15px}}@media (min-width:992px){.row{margin-right:-15px;margin-left:-15px}}@media (min-width:1200px){.row{margin-right:-15px;margin-left:-15px}}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}@media (min-width:576px){.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9{padding-right:15px;padding-left:15px}}@media (min-width:768px){.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9{padding-right:15px;padding-left:15px}}@media (min-width:992px){.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9{padding-right:15px;padding-left:15px}}@media (min-width:1200px){.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9{padding-right:15px;padding-left:15px}}.col{-webkit-flex-basis:0;-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-webkit-flex-grow:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.col-1{-webkit-box-flex:0;-webkit-flex:0 0 8.333333%;-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-2{-webkit-box-flex:0;-webkit-flex:0 0 16.666667%;-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-3{-webkit-box-flex:0;-webkit-flex:0 0 25%;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-webkit-box-flex:0;-webkit-flex:0 0 33.333333%;-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-5{-webkit-box-flex:0;-webkit-flex:0 0 41.666667%;-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-6{-webkit-box-flex:0;-webkit-flex:0 0 50%;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-webkit-box-flex:0;-webkit-flex:0 0 58.333333%;-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-8{-webkit-box-flex:0;-webkit-flex:0 0 66.666667%;-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-9{-webkit-box-flex:0;-webkit-flex:0 0 75%;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-webkit-box-flex:0;-webkit-flex:0 0 83.333333%;-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-11{-webkit-box-flex:0;-webkit-flex:0 0 91.666667%;-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-12{-webkit-box-flex:0;-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.pull-0{right:auto}.pull-1{right:8.333333%}.pull-2{right:16.666667%}.pull-3{right:25%}.pull-4{right:33.333333%}.pull-5{right:41.666667%}.pull-6{right:50%}.pull-7{right:58.333333%}.pull-8{right:66.666667%}.pull-9{right:75%}.pull-10{right:83.333333%}.pull-11{right:91.666667%}.pull-12{right:100%}.push-0{left:auto}.push-1{left:8.333333%}.push-2{left:16.666667%}.push-3{left:25%}.push-4{left:33.333333%}.push-5{left:41.666667%}.push-6{left:50%}.push-7{left:58.333333%}.push-8{left:66.666667%}.push-9{left:75%}.push-10{left:83.333333%}.push-11{left:91.666667%}.push-12{left:100%}.offset-1{margin-left:8.333333%}.offset-2{margin-left:16.666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.333333%}.offset-5{margin-left:41.666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.333333%}.offset-8{margin-left:66.666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.333333%}.offset-11{margin-left:91.666667%}@media (min-width:576px){.col-sm{-webkit-flex-basis:0;-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-webkit-flex-grow:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.col-sm-1{-webkit-box-flex:0;-webkit-flex:0 0 8.333333%;-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-sm-2{-webkit-box-flex:0;-webkit-flex:0 0 16.666667%;-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-sm-3{-webkit-box-flex:0;-webkit-flex:0 0 25%;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-webkit-box-flex:0;-webkit-flex:0 0 33.333333%;-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-sm-5{-webkit-box-flex:0;-webkit-flex:0 0 41.666667%;-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-sm-6{-webkit-box-flex:0;-webkit-flex:0 0 50%;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-webkit-box-flex:0;-webkit-flex:0 0 58.333333%;-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-sm-8{-webkit-box-flex:0;-webkit-flex:0 0 66.666667%;-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-sm-9{-webkit-box-flex:0;-webkit-flex:0 0 75%;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-webkit-box-flex:0;-webkit-flex:0 0 83.333333%;-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-sm-11{-webkit-box-flex:0;-webkit-flex:0 0 91.666667%;-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-sm-12{-webkit-box-flex:0;-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.pull-sm-0{right:auto}.pull-sm-1{right:8.333333%}.pull-sm-2{right:16.666667%}.pull-sm-3{right:25%}.pull-sm-4{right:33.333333%}.pull-sm-5{right:41.666667%}.pull-sm-6{right:50%}.pull-sm-7{right:58.333333%}.pull-sm-8{right:66.666667%}.pull-sm-9{right:75%}.pull-sm-10{right:83.333333%}.pull-sm-11{right:91.666667%}.pull-sm-12{right:100%}.push-sm-0{left:auto}.push-sm-1{left:8.333333%}.push-sm-2{left:16.666667%}.push-sm-3{left:25%}.push-sm-4{left:33.333333%}.push-sm-5{left:41.666667%}.push-sm-6{left:50%}.push-sm-7{left:58.333333%}.push-sm-8{left:66.666667%}.push-sm-9{left:75%}.push-sm-10{left:83.333333%}.push-sm-11{left:91.666667%}.push-sm-12{left:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.333333%}.offset-sm-2{margin-left:16.666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.333333%}.offset-sm-5{margin-left:41.666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.333333%}.offset-sm-8{margin-left:66.666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.333333%}.offset-sm-11{margin-left:91.666667%}}@media (min-width:768px){.col-md{-webkit-flex-basis:0;-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-webkit-flex-grow:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.col-md-1{-webkit-box-flex:0;-webkit-flex:0 0 8.333333%;-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-md-2{-webkit-box-flex:0;-webkit-flex:0 0 16.666667%;-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-md-3{-webkit-box-flex:0;-webkit-flex:0 0 25%;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-webkit-box-flex:0;-webkit-flex:0 0 33.333333%;-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-md-5{-webkit-box-flex:0;-webkit-flex:0 0 41.666667%;-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-md-6{-webkit-box-flex:0;-webkit-flex:0 0 50%;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-webkit-box-flex:0;-webkit-flex:0 0 58.333333%;-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-md-8{-webkit-box-flex:0;-webkit-flex:0 0 66.666667%;-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-md-9{-webkit-box-flex:0;-webkit-flex:0 0 75%;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-webkit-box-flex:0;-webkit-flex:0 0 83.333333%;-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-md-11{-webkit-box-flex:0;-webkit-flex:0 0 91.666667%;-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-md-12{-webkit-box-flex:0;-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.pull-md-0{right:auto}.pull-md-1{right:8.333333%}.pull-md-2{right:16.666667%}.pull-md-3{right:25%}.pull-md-4{right:33.333333%}.pull-md-5{right:41.666667%}.pull-md-6{right:50%}.pull-md-7{right:58.333333%}.pull-md-8{right:66.666667%}.pull-md-9{right:75%}.pull-md-10{right:83.333333%}.pull-md-11{right:91.666667%}.pull-md-12{right:100%}.push-md-0{left:auto}.push-md-1{left:8.333333%}.push-md-2{left:16.666667%}.push-md-3{left:25%}.push-md-4{left:33.333333%}.push-md-5{left:41.666667%}.push-md-6{left:50%}.push-md-7{left:58.333333%}.push-md-8{left:66.666667%}.push-md-9{left:75%}.push-md-10{left:83.333333%}.push-md-11{left:91.666667%}.push-md-12{left:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.333333%}.offset-md-2{margin-left:16.666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.333333%}.offset-md-5{margin-left:41.666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.333333%}.offset-md-8{margin-left:66.666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.333333%}.offset-md-11{margin-left:91.666667%}}@media (min-width:992px){.col-lg{-webkit-flex-basis:0;-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-webkit-flex-grow:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.col-lg-1{-webkit-box-flex:0;-webkit-flex:0 0 8.333333%;-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-lg-2{-webkit-box-flex:0;-webkit-flex:0 0 16.666667%;-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-lg-3{-webkit-box-flex:0;-webkit-flex:0 0 25%;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-webkit-box-flex:0;-webkit-flex:0 0 33.333333%;-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-lg-5{-webkit-box-flex:0;-webkit-flex:0 0 41.666667%;-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-lg-6{-webkit-box-flex:0;-webkit-flex:0 0 50%;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-webkit-box-flex:0;-webkit-flex:0 0 58.333333%;-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-lg-8{-webkit-box-flex:0;-webkit-flex:0 0 66.666667%;-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-lg-9{-webkit-box-flex:0;-webkit-flex:0 0 75%;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-webkit-box-flex:0;-webkit-flex:0 0 83.333333%;-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-lg-11{-webkit-box-flex:0;-webkit-flex:0 0 91.666667%;-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-lg-12{-webkit-box-flex:0;-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.pull-lg-0{right:auto}.pull-lg-1{right:8.333333%}.pull-lg-2{right:16.666667%}.pull-lg-3{right:25%}.pull-lg-4{right:33.333333%}.pull-lg-5{right:41.666667%}.pull-lg-6{right:50%}.pull-lg-7{right:58.333333%}.pull-lg-8{right:66.666667%}.pull-lg-9{right:75%}.pull-lg-10{right:83.333333%}.pull-lg-11{right:91.666667%}.pull-lg-12{right:100%}.push-lg-0{left:auto}.push-lg-1{left:8.333333%}.push-lg-2{left:16.666667%}.push-lg-3{left:25%}.push-lg-4{left:33.333333%}.push-lg-5{left:41.666667%}.push-lg-6{left:50%}.push-lg-7{left:58.333333%}.push-lg-8{left:66.666667%}.push-lg-9{left:75%}.push-lg-10{left:83.333333%}.push-lg-11{left:91.666667%}.push-lg-12{left:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.333333%}.offset-lg-2{margin-left:16.666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.333333%}.offset-lg-5{margin-left:41.666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.333333%}.offset-lg-8{margin-left:66.666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.333333%}.offset-lg-11{margin-left:91.666667%}}@media (min-width:1200px){.col-xl{-webkit-flex-basis:0;-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-webkit-flex-grow:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.col-xl-1{-webkit-box-flex:0;-webkit-flex:0 0 8.333333%;-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-xl-2{-webkit-box-flex:0;-webkit-flex:0 0 16.666667%;-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-xl-3{-webkit-box-flex:0;-webkit-flex:0 0 25%;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-webkit-box-flex:0;-webkit-flex:0 0 33.333333%;-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-xl-5{-webkit-box-flex:0;-webkit-flex:0 0 41.666667%;-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-xl-6{-webkit-box-flex:0;-webkit-flex:0 0 50%;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-webkit-box-flex:0;-webkit-flex:0 0 58.333333%;-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-xl-8{-webkit-box-flex:0;-webkit-flex:0 0 66.666667%;-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-xl-9{-webkit-box-flex:0;-webkit-flex:0 0 75%;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-webkit-box-flex:0;-webkit-flex:0 0 83.333333%;-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-xl-11{-webkit-box-flex:0;-webkit-flex:0 0 91.666667%;-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-xl-12{-webkit-box-flex:0;-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.pull-xl-0{right:auto}.pull-xl-1{right:8.333333%}.pull-xl-2{right:16.666667%}.pull-xl-3{right:25%}.pull-xl-4{right:33.333333%}.pull-xl-5{right:41.666667%}.pull-xl-6{right:50%}.pull-xl-7{right:58.333333%}.pull-xl-8{right:66.666667%}.pull-xl-9{right:75%}.pull-xl-10{right:83.333333%}.pull-xl-11{right:91.666667%}.pull-xl-12{right:100%}.push-xl-0{left:auto}.push-xl-1{left:8.333333%}.push-xl-2{left:16.666667%}.push-xl-3{left:25%}.push-xl-4{left:33.333333%}.push-xl-5{left:41.666667%}.push-xl-6{left:50%}.push-xl-7{left:58.333333%}.push-xl-8{left:66.666667%}.push-xl-9{left:75%}.push-xl-10{left:83.333333%}.push-xl-11{left:91.666667%}.push-xl-12{left:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.333333%}.offset-xl-2{margin-left:16.666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.333333%}.offset-xl-5{margin-left:41.666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.333333%}.offset-xl-8{margin-left:66.666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.333333%}.offset-xl-11{margin-left:91.666667%}}.table{width:100%;max-width:100%;margin-bottom:1rem}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #eceeef}.table thead th{vertical-align:bottom;border-bottom:2px solid #eceeef}.table tbody+tbody{border-top:2px solid #eceeef}.table .table{background-color:#fff}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #eceeef}.table-bordered td,.table-bordered th{border:1px solid #eceeef}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,.05)}.table-hover tbody tr:hover{background-color:rgba(0,0,0,.075)}.table-active,.table-active>td,.table-active>th{background-color:rgba(0,0,0,.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,.075)}.table-success,.table-success>td,.table-success>th{background-color:#dff0d8}.table-hover .table-success:hover{background-color:#d0e9c6}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#d0e9c6}.table-info,.table-info>td,.table-info>th{background-color:#d9edf7}.table-hover .table-info:hover{background-color:#c4e3f3}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#c4e3f3}.table-warning,.table-warning>td,.table-warning>th{background-color:#fcf8e3}.table-hover .table-warning:hover{background-color:#faf2cc}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#faf2cc}.table-danger,.table-danger>td,.table-danger>th{background-color:#f2dede}.table-hover .table-danger:hover{background-color:#ebcccc}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#ebcccc}.thead-inverse th{color:#fff;background-color:#292b2c}.thead-default th{color:#464a4c;background-color:#eceeef}.table-inverse{color:#fff;background-color:#292b2c}.table-inverse td,.table-inverse th,.table-inverse thead th{border-color:#fff}.table-inverse.table-bordered{border:0}.table-responsive{display:block;width:100%;overflow-x:auto;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive.table-bordered{border:0}.form-control{display:block;width:100%;padding:.5rem .75rem;font-size:1rem;line-height:1.25;color:#464a4c;background-color:#fff;background-image:none;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem;-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s,-webkit-box-shadow ease-in-out .15s}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#464a4c;background-color:#fff;border-color:#5cb3fd;outline:0}.form-control::-webkit-input-placeholder{color:#636c72;opacity:1}.form-control::-moz-placeholder{color:#636c72;opacity:1}.form-control:-ms-input-placeholder{color:#636c72;opacity:1}.form-control::placeholder{color:#636c72;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#eceeef;opacity:1}.form-control:disabled{cursor:not-allowed}select.form-control:not([size]):not([multiple]){height:calc(2.25rem + 2px)}select.form-control:focus::-ms-value{color:#464a4c;background-color:#fff}.form-control-file,.form-control-range{display:block}.col-form-label{padding-top:calc(.5rem - 1px * 2);padding-bottom:calc(.5rem - 1px * 2);margin-bottom:0}.col-form-label-lg{padding-top:calc(.75rem - 1px * 2);padding-bottom:calc(.75rem - 1px * 2);font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem - 1px * 2);padding-bottom:calc(.25rem - 1px * 2);font-size:.875rem}.col-form-legend{padding-top:.5rem;padding-bottom:.5rem;margin-bottom:0;font-size:1rem}.form-control-static{padding-top:.5rem;padding-bottom:.5rem;margin-bottom:0;line-height:1.25;border:solid transparent;border-width:1px 0}.form-control-static.form-control-lg,.form-control-static.form-control-sm,.input-group-lg>.form-control-static.form-control,.input-group-lg>.form-control-static.input-group-addon,.input-group-lg>.input-group-btn>.form-control-static.btn,.input-group-sm>.form-control-static.form-control,.input-group-sm>.form-control-static.input-group-addon,.input-group-sm>.input-group-btn>.form-control-static.btn{padding-right:0;padding-left:0}.form-control-sm,.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-sm>.input-group-btn>select.btn:not([size]):not([multiple]),.input-group-sm>select.form-control:not([size]):not([multiple]),.input-group-sm>select.input-group-addon:not([size]):not([multiple]),select.form-control-sm:not([size]):not([multiple]){height:1.8125rem}.form-control-lg,.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{padding:.75rem 1.5rem;font-size:1.25rem;border-radius:.3rem}.input-group-lg>.input-group-btn>select.btn:not([size]):not([multiple]),.input-group-lg>select.form-control:not([size]):not([multiple]),.input-group-lg>select.input-group-addon:not([size]):not([multiple]),select.form-control-lg:not([size]):not([multiple]){height:3.166667rem}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-check{position:relative;display:block;margin-bottom:.5rem}.form-check.disabled .form-check-label{color:#636c72;cursor:not-allowed}.form-check-label{padding-left:1.25rem;margin-bottom:0;cursor:pointer}.form-check-input{position:absolute;margin-top:.25rem;margin-left:-1.25rem}.form-check-input:only-child{position:static}.form-check-inline{display:inline-block}.form-check-inline .form-check-label{vertical-align:middle}.form-check-inline+.form-check-inline{margin-left:.75rem}.form-control-feedback{margin-top:.25rem}.form-control-danger,.form-control-success,.form-control-warning{padding-right:2.25rem;background-repeat:no-repeat;background-position:center right .5625rem;-webkit-background-size:1.125rem 1.125rem;background-size:1.125rem 1.125rem}.has-success .col-form-label,.has-success .custom-control,.has-success .form-check-label,.has-success .form-control-feedback,.has-success .form-control-label{color:#5cb85c}.has-success .form-control{border-color:#5cb85c}.has-success .input-group-addon{color:#5cb85c;border-color:#5cb85c;background-color:#eaf6ea}.has-success .form-control-success{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%235cb85c' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E")}.has-warning .col-form-label,.has-warning .custom-control,.has-warning .form-check-label,.has-warning .form-control-feedback,.has-warning .form-control-label{color:#f0ad4e}.has-warning .form-control{border-color:#f0ad4e}.has-warning .input-group-addon{color:#f0ad4e;border-color:#f0ad4e;background-color:#fff}.has-warning .form-control-warning{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23f0ad4e' d='M4.4 5.324h-.8v-2.46h.8zm0 1.42h-.8V5.89h.8zM3.76.63L.04 7.075c-.115.2.016.425.26.426h7.397c.242 0 .372-.226.258-.426C6.726 4.924 5.47 2.79 4.253.63c-.113-.174-.39-.174-.494 0z'/%3E%3C/svg%3E")}.has-danger .col-form-label,.has-danger .custom-control,.has-danger .form-check-label,.has-danger .form-control-feedback,.has-danger .form-control-label{color:#d9534f}.has-danger .form-control{border-color:#d9534f}.has-danger .input-group-addon{color:#d9534f;border-color:#d9534f;background-color:#fdf7f7}.has-danger .form-control-danger{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23d9534f' viewBox='-2 -2 7 7'%3E%3Cpath stroke='%23d9534f' d='M0 0l3 3m0-3L0 3'/%3E%3Ccircle r='.5'/%3E%3Ccircle cx='3' r='.5'/%3E%3Ccircle cy='3' r='.5'/%3E%3Ccircle cx='3' cy='3' r='.5'/%3E%3C/svg%3E")}.form-inline{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.form-inline .form-check{width:100%}@media (min-width:576px){.form-inline label{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-webkit-flex:0 0 auto;-ms-flex:0 0 auto;flex:0 0 auto;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{width:auto}.form-inline .form-control-label{margin-bottom:0;vertical-align:middle}.form-inline .form-check{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;width:auto;margin-top:0;margin-bottom:0}.form-inline .form-check-label{padding-left:0}.form-inline .form-check-input{position:relative;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;padding-left:0}.form-inline .custom-control-indicator{position:static;display:inline-block;margin-right:.25rem;vertical-align:text-bottom}.form-inline .has-feedback .form-control-feedback{top:0}}.btn{display:inline-block;font-weight:400;line-height:1.25;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;padding:.5rem 1rem;font-size:1rem;border-radius:.25rem;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.btn:focus,.btn:hover{text-decoration:none}.btn.focus,.btn:focus{outline:0;-webkit-box-shadow:0 0 0 2px rgba(2,117,216,.25);box-shadow:0 0 0 2px rgba(2,117,216,.25)}.btn.disabled,.btn:disabled{cursor:not-allowed;opacity:.65}.btn.active,.btn:active{background-image:none}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#0275d8;border-color:#0275d8}.btn-primary:hover{color:#fff;background-color:#025aa5;border-color:#01549b}.btn-primary.focus,.btn-primary:focus{-webkit-box-shadow:0 0 0 2px rgba(2,117,216,.5);box-shadow:0 0 0 2px rgba(2,117,216,.5)}.btn-primary.disabled,.btn-primary:disabled{background-color:#0275d8;border-color:#0275d8}.btn-primary.active,.btn-primary:active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#025aa5;background-image:none;border-color:#01549b}.btn-secondary{color:#292b2c;background-color:#fff;border-color:#ccc}.btn-secondary:hover{color:#292b2c;background-color:#e6e6e6;border-color:#adadad}.btn-secondary.focus,.btn-secondary:focus{-webkit-box-shadow:0 0 0 2px rgba(204,204,204,.5);box-shadow:0 0 0 2px rgba(204,204,204,.5)}.btn-secondary.disabled,.btn-secondary:disabled{background-color:#fff;border-color:#ccc}.btn-secondary.active,.btn-secondary:active,.show>.btn-secondary.dropdown-toggle{color:#292b2c;background-color:#e6e6e6;background-image:none;border-color:#adadad}.btn-info{color:#fff;background-color:#5bc0de;border-color:#5bc0de}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#2aabd2}.btn-info.focus,.btn-info:focus{-webkit-box-shadow:0 0 0 2px rgba(91,192,222,.5);box-shadow:0 0 0 2px rgba(91,192,222,.5)}.btn-info.disabled,.btn-info:disabled{background-color:#5bc0de;border-color:#5bc0de}.btn-info.active,.btn-info:active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#31b0d5;background-image:none;border-color:#2aabd2}.btn-success{color:#fff;background-color:#5cb85c;border-color:#5cb85c}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#419641}.btn-success.focus,.btn-success:focus{-webkit-box-shadow:0 0 0 2px rgba(92,184,92,.5);box-shadow:0 0 0 2px rgba(92,184,92,.5)}.btn-success.disabled,.btn-success:disabled{background-color:#5cb85c;border-color:#5cb85c}.btn-success.active,.btn-success:active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#449d44;background-image:none;border-color:#419641}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#f0ad4e}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#eb9316}.btn-warning.focus,.btn-warning:focus{-webkit-box-shadow:0 0 0 2px rgba(240,173,78,.5);box-shadow:0 0 0 2px rgba(240,173,78,.5)}.btn-warning.disabled,.btn-warning:disabled{background-color:#f0ad4e;border-color:#f0ad4e}.btn-warning.active,.btn-warning:active,.show>.btn-warning.dropdown-toggle{color:#fff;background-color:#ec971f;background-image:none;border-color:#eb9316}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d9534f}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#c12e2a}.btn-danger.focus,.btn-danger:focus{-webkit-box-shadow:0 0 0 2px rgba(217,83,79,.5);box-shadow:0 0 0 2px rgba(217,83,79,.5)}.btn-danger.disabled,.btn-danger:disabled{background-color:#d9534f;border-color:#d9534f}.btn-danger.active,.btn-danger:active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#c9302c;background-image:none;border-color:#c12e2a}.btn-outline-primary{color:#0275d8;background-image:none;background-color:transparent;border-color:#0275d8}.btn-outline-primary:hover{color:#fff;background-color:#0275d8;border-color:#0275d8}.btn-outline-primary.focus,.btn-outline-primary:focus{-webkit-box-shadow:0 0 0 2px rgba(2,117,216,.5);box-shadow:0 0 0 2px rgba(2,117,216,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#0275d8;background-color:transparent}.btn-outline-primary.active,.btn-outline-primary:active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#0275d8;border-color:#0275d8}.btn-outline-secondary{color:#ccc;background-image:none;background-color:transparent;border-color:#ccc}.btn-outline-secondary:hover{color:#fff;background-color:#ccc;border-color:#ccc}.btn-outline-secondary.focus,.btn-outline-secondary:focus{-webkit-box-shadow:0 0 0 2px rgba(204,204,204,.5);box-shadow:0 0 0 2px rgba(204,204,204,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#ccc;background-color:transparent}.btn-outline-secondary.active,.btn-outline-secondary:active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#ccc;border-color:#ccc}.btn-outline-info{color:#5bc0de;background-image:none;background-color:transparent;border-color:#5bc0de}.btn-outline-info:hover{color:#fff;background-color:#5bc0de;border-color:#5bc0de}.btn-outline-info.focus,.btn-outline-info:focus{-webkit-box-shadow:0 0 0 2px rgba(91,192,222,.5);box-shadow:0 0 0 2px rgba(91,192,222,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#5bc0de;background-color:transparent}.btn-outline-info.active,.btn-outline-info:active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#5bc0de;border-color:#5bc0de}.btn-outline-success{color:#5cb85c;background-image:none;background-color:transparent;border-color:#5cb85c}.btn-outline-success:hover{color:#fff;background-color:#5cb85c;border-color:#5cb85c}.btn-outline-success.focus,.btn-outline-success:focus{-webkit-box-shadow:0 0 0 2px rgba(92,184,92,.5);box-shadow:0 0 0 2px rgba(92,184,92,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#5cb85c;background-color:transparent}.btn-outline-success.active,.btn-outline-success:active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#5cb85c;border-color:#5cb85c}.btn-outline-warning{color:#f0ad4e;background-image:none;background-color:transparent;border-color:#f0ad4e}.btn-outline-warning:hover{color:#fff;background-color:#f0ad4e;border-color:#f0ad4e}.btn-outline-warning.focus,.btn-outline-warning:focus{-webkit-box-shadow:0 0 0 2px rgba(240,173,78,.5);box-shadow:0 0 0 2px rgba(240,173,78,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#f0ad4e;background-color:transparent}.btn-outline-warning.active,.btn-outline-warning:active,.show>.btn-outline-warning.dropdown-toggle{color:#fff;background-color:#f0ad4e;border-color:#f0ad4e}.btn-outline-danger{color:#d9534f;background-image:none;background-color:transparent;border-color:#d9534f}.btn-outline-danger:hover{color:#fff;background-color:#d9534f;border-color:#d9534f}.btn-outline-danger.focus,.btn-outline-danger:focus{-webkit-box-shadow:0 0 0 2px rgba(217,83,79,.5);box-shadow:0 0 0 2px rgba(217,83,79,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#d9534f;background-color:transparent}.btn-outline-danger.active,.btn-outline-danger:active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#d9534f;border-color:#d9534f}.btn-link{font-weight:400;color:#0275d8;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link:disabled{background-color:transparent}.btn-link,.btn-link:active,.btn-link:focus{border-color:transparent}.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#014c8c;text-decoration:underline;background-color:transparent}.btn-link:disabled{color:#636c72}.btn-link:disabled:focus,.btn-link:disabled:hover{text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:.75rem 1.5rem;font-size:1.25rem;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.show{opacity:1}.collapse{display:none}.collapse.show{display:block}tr.collapse.show{display:table-row}tbody.collapse.show{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.dropdown,.dropup{position:relative}.dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.3em;vertical-align:middle;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-left:.3em solid transparent}.dropdown-toggle:focus{outline:0}.dropup .dropdown-toggle::after{border-top:0;border-bottom:.3em solid}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#292b2c;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-divider{height:1px;margin:.5rem 0;overflow:hidden;background-color:#eceeef}.dropdown-item{display:block;width:100%;padding:3px 1.5rem;clear:both;font-weight:400;color:#292b2c;text-align:inherit;white-space:nowrap;background:0 0;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#1d1e1f;text-decoration:none;background-color:#f7f7f9}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#0275d8}.dropdown-item.disabled,.dropdown-item:disabled{color:#636c72;cursor:not-allowed;background-color:transparent}.show>.dropdown-menu{display:block}.show>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#636c72;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.dropup .dropdown-menu{top:auto;bottom:100%;margin-bottom:.125rem}.btn-group,.btn-group-vertical{position:relative;display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;-webkit-box-flex:0;-webkit-flex:0 1 auto;-ms-flex:0 1 auto;flex:0 1 auto}.btn-group-vertical>.btn:hover,.btn-group>.btn:hover{z-index:2}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group,.btn-group-vertical .btn+.btn,.btn-group-vertical .btn+.btn-group,.btn-group-vertical .btn-group+.btn,.btn-group-vertical .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-pack:start;-webkit-justify-content:flex-start;-ms-flex-pack:start;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn+.dropdown-toggle-split::after{margin-left:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:1.125rem;padding-left:1.125rem}.btn-group-vertical{display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;-webkit-box-align:start;-webkit-align-items:flex-start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center}.btn-group-vertical .btn,.btn-group-vertical .btn-group{width:100%}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;width:100%}.input-group .form-control{position:relative;z-index:2;-webkit-box-flex:1;-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto;width:1%;margin-bottom:0}.input-group .form-control:active,.input-group .form-control:focus,.input-group .form-control:hover{z-index:3}.input-group .form-control,.input-group-addon,.input-group-btn{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{white-space:nowrap;vertical-align:middle}.input-group-addon{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.25;color:#464a4c;text-align:center;background-color:#eceeef;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.input-group-addon.form-control-sm,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.input-group-addon.btn{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-addon.form-control-lg,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.input-group-addon.btn{padding:.75rem 1.5rem;font-size:1.25rem;border-radius:.3rem}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:not(:last-child),.input-group-addon:not(:last-child),.input-group-btn:not(:first-child)>.btn-group:not(:last-child)>.btn,.input-group-btn:not(:first-child)>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:not(:last-child)>.btn,.input-group-btn:not(:last-child)>.btn-group>.btn,.input-group-btn:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-top-right-radius:0}.input-group-addon:not(:last-child){border-right:0}.input-group .form-control:not(:first-child),.input-group-addon:not(:first-child),.input-group-btn:not(:first-child)>.btn,.input-group-btn:not(:first-child)>.btn-group>.btn,.input-group-btn:not(:first-child)>.dropdown-toggle,.input-group-btn:not(:last-child)>.btn-group:not(:first-child)>.btn,.input-group-btn:not(:last-child)>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.form-control+.input-group-addon:not(:first-child){border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative;-webkit-box-flex:1;-webkit-flex:1 1 0%;-ms-flex:1 1 0%;flex:1 1 0%}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:3}.input-group-btn:not(:last-child)>.btn,.input-group-btn:not(:last-child)>.btn-group{margin-right:-1px}.input-group-btn:not(:first-child)>.btn,.input-group-btn:not(:first-child)>.btn-group{z-index:2;margin-left:-1px}.input-group-btn:not(:first-child)>.btn-group:active,.input-group-btn:not(:first-child)>.btn-group:focus,.input-group-btn:not(:first-child)>.btn-group:hover,.input-group-btn:not(:first-child)>.btn:active,.input-group-btn:not(:first-child)>.btn:focus,.input-group-btn:not(:first-child)>.btn:hover{z-index:3}.custom-control{position:relative;display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;min-height:1.5rem;padding-left:1.5rem;margin-right:1rem;cursor:pointer}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked~.custom-control-indicator{color:#fff;background-color:#0275d8}.custom-control-input:focus~.custom-control-indicator{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 3px #0275d8;box-shadow:0 0 0 1px #fff,0 0 0 3px #0275d8}.custom-control-input:active~.custom-control-indicator{color:#fff;background-color:#8fcafe}.custom-control-input:disabled~.custom-control-indicator{cursor:not-allowed;background-color:#eceeef}.custom-control-input:disabled~.custom-control-description{color:#636c72;cursor:not-allowed}.custom-control-indicator{position:absolute;top:.25rem;left:0;display:block;width:1rem;height:1rem;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#ddd;background-repeat:no-repeat;background-position:center center;-webkit-background-size:50% 50%;background-size:50% 50%}.custom-checkbox .custom-control-indicator{border-radius:.25rem}.custom-checkbox .custom-control-input:checked~.custom-control-indicator{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-indicator{background-color:#0275d8;background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-radio .custom-control-indicator{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-indicator{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-controls-stacked{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column}.custom-controls-stacked .custom-control{margin-bottom:.25rem}.custom-controls-stacked .custom-control+.custom-control{margin-left:0}.custom-select{display:inline-block;max-width:100%;height:calc(2.25rem + 2px);padding:.375rem 1.75rem .375rem .75rem;line-height:1.25;color:#464a4c;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23333' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center;-webkit-background-size:8px 10px;background-size:8px 10px;border:1px solid rgba(0,0,0,.15);border-radius:.25rem;-moz-appearance:none;-webkit-appearance:none}.custom-select:focus{border-color:#5cb3fd;outline:0}.custom-select:focus::-ms-value{color:#464a4c;background-color:#fff}.custom-select:disabled{color:#636c72;cursor:not-allowed;background-color:#eceeef}.custom-select::-ms-expand{opacity:0}.custom-select-sm{padding-top:.375rem;padding-bottom:.375rem;font-size:75%}.custom-file{position:relative;display:inline-block;max-width:100%;height:2.5rem;margin-bottom:0;cursor:pointer}.custom-file-input{min-width:14rem;max-width:100%;height:2.5rem;margin:0;filter:alpha(opacity=0);opacity:0}.custom-file-control{position:absolute;top:0;right:0;left:0;z-index:5;height:2.5rem;padding:.5rem 1rem;line-height:1.5;color:#464a4c;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#fff;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.custom-file-control:lang(en)::after{content:"Choose file..."}.custom-file-control::before{position:absolute;top:-1px;right:-1px;bottom:-1px;z-index:6;display:block;height:2.5rem;padding:.5rem 1rem;line-height:1.5;color:#464a4c;background-color:#eceeef;border:1px solid rgba(0,0,0,.15);border-radius:0 .25rem .25rem 0}.custom-file-control:lang(en)::before{content:"Browse"}.nav{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5em 1em}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#636c72;cursor:not-allowed}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-right-radius:.25rem;border-top-left-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#eceeef #eceeef #ddd}.nav-tabs .nav-link.disabled{color:#636c72;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#464a4c;background-color:#fff;border-color:#ddd #ddd #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.nav-pills .nav-link{border-radius:.25rem}.nav-pills .nav-item.show .nav-link,.nav-pills .nav-link.active{color:#fff;cursor:default;background-color:#0275d8}.nav-fill .nav-item{-webkit-box-flex:1;-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto;text-align:center}.nav-justified .nav-item{-webkit-box-flex:1;-webkit-flex:1 1 100%;-ms-flex:1 1 100%;flex:1 1 100%;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;padding:.5rem 1rem}.navbar-brand{display:inline-block;padding-top:.25rem;padding-bottom:.25rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-text{display:inline-block;padding-top:.425rem;padding-bottom:.425rem}.navbar-toggler{-webkit-align-self:flex-start;-ms-flex-item-align:start;align-self:flex-start;padding:.25rem .75rem;font-size:1.25rem;line-height:1;background:0 0;border:1px solid transparent;border-radius:.25rem}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;-webkit-background-size:100% 100%;background-size:100% 100%}.navbar-toggler-left{position:absolute;left:1rem}.navbar-toggler-right{position:absolute;right:1rem}@media (max-width:575px){.navbar-toggleable .navbar-nav .dropdown-menu{position:static;float:none}.navbar-toggleable>.container{padding-right:0;padding-left:0}}@media (min-width:576px){.navbar-toggleable{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row}.navbar-toggleable .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-toggleable>.container{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable .navbar-collapse{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important;width:100%}.navbar-toggleable .navbar-toggler{display:none}}@media (max-width:767px){.navbar-toggleable-sm .navbar-nav .dropdown-menu{position:static;float:none}.navbar-toggleable-sm>.container{padding-right:0;padding-left:0}}@media (min-width:768px){.navbar-toggleable-sm{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-sm .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row}.navbar-toggleable-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-toggleable-sm>.container{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-sm .navbar-collapse{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important;width:100%}.navbar-toggleable-sm .navbar-toggler{display:none}}@media (max-width:991px){.navbar-toggleable-md .navbar-nav .dropdown-menu{position:static;float:none}.navbar-toggleable-md>.container{padding-right:0;padding-left:0}}@media (min-width:992px){.navbar-toggleable-md{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-md .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row}.navbar-toggleable-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-toggleable-md>.container{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-md .navbar-collapse{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important;width:100%}.navbar-toggleable-md .navbar-toggler{display:none}}@media (max-width:1199px){.navbar-toggleable-lg .navbar-nav .dropdown-menu{position:static;float:none}.navbar-toggleable-lg>.container{padding-right:0;padding-left:0}}@media (min-width:1200px){.navbar-toggleable-lg{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-lg .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row}.navbar-toggleable-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-toggleable-lg>.container{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-lg .navbar-collapse{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important;width:100%}.navbar-toggleable-lg .navbar-toggler{display:none}}.navbar-toggleable-xl{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-xl .navbar-nav .dropdown-menu{position:static;float:none}.navbar-toggleable-xl>.container{padding-right:0;padding-left:0}.navbar-toggleable-xl .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row}.navbar-toggleable-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-toggleable-xl>.container{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:nowrap;-ms-flex-wrap:nowrap;flex-wrap:nowrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center}.navbar-toggleable-xl .navbar-collapse{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important;width:100%}.navbar-toggleable-xl .navbar-toggler{display:none}.navbar-light .navbar-brand,.navbar-light .navbar-toggler{color:rgba(0,0,0,.9)}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover,.navbar-light .navbar-toggler:focus,.navbar-light .navbar-toggler:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.5)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.open,.navbar-light .navbar-nav .open>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,.5)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-toggler{color:#fff}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-toggler:focus,.navbar-inverse .navbar-toggler:hover{color:#fff}.navbar-inverse .navbar-nav .nav-link{color:rgba(255,255,255,.5)}.navbar-inverse .navbar-nav .nav-link:focus,.navbar-inverse .navbar-nav .nav-link:hover{color:rgba(255,255,255,.75)}.navbar-inverse .navbar-nav .nav-link.disabled{color:rgba(255,255,255,.25)}.navbar-inverse .navbar-nav .active>.nav-link,.navbar-inverse .navbar-nav .nav-link.active,.navbar-inverse .navbar-nav .nav-link.open,.navbar-inverse .navbar-nav .open>.nav-link{color:#fff}.navbar-inverse .navbar-toggler{border-color:rgba(255,255,255,.1)}.navbar-inverse .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E%3C/svg%3E")}.navbar-inverse .navbar-text{color:rgba(255,255,255,.5)}.card{position:relative;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;background-color:#fff;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card-block{-webkit-box-flex:1;-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card>.list-group:first-child .list-group-item:first-child{border-top-right-radius:.25rem;border-top-left-radius:.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:#f7f7f9;border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{padding:.75rem 1.25rem;background-color:#f7f7f9;border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.625rem;margin-bottom:-.75rem;margin-left:-.625rem;border-bottom:0}.card-header-pills{margin-right:-.625rem;margin-left:-.625rem}.card-primary{background-color:#0275d8;border-color:#0275d8}.card-primary .card-footer,.card-primary .card-header{background-color:transparent}.card-success{background-color:#5cb85c;border-color:#5cb85c}.card-success .card-footer,.card-success .card-header{background-color:transparent}.card-info{background-color:#5bc0de;border-color:#5bc0de}.card-info .card-footer,.card-info .card-header{background-color:transparent}.card-warning{background-color:#f0ad4e;border-color:#f0ad4e}.card-warning .card-footer,.card-warning .card-header{background-color:transparent}.card-danger{background-color:#d9534f;border-color:#d9534f}.card-danger .card-footer,.card-danger .card-header{background-color:transparent}.card-outline-primary{background-color:transparent;border-color:#0275d8}.card-outline-secondary{background-color:transparent;border-color:#ccc}.card-outline-info{background-color:transparent;border-color:#5bc0de}.card-outline-success{background-color:transparent;border-color:#5cb85c}.card-outline-warning{background-color:transparent;border-color:#f0ad4e}.card-outline-danger{background-color:transparent;border-color:#d9534f}.card-inverse{color:rgba(255,255,255,.65)}.card-inverse .card-footer,.card-inverse .card-header{background-color:transparent;border-color:rgba(255,255,255,.2)}.card-inverse .card-blockquote,.card-inverse .card-footer,.card-inverse .card-header,.card-inverse .card-title{color:#fff}.card-inverse .card-blockquote .blockquote-footer,.card-inverse .card-link,.card-inverse .card-subtitle,.card-inverse .card-text{color:rgba(255,255,255,.65)}.card-inverse .card-link:focus,.card-inverse .card-link:hover{color:#fff}.card-blockquote{padding:0;margin-bottom:0;border-left:0}.card-img{border-radius:calc(.25rem - 1px)}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img-top{border-top-right-radius:calc(.25rem - 1px);border-top-left-radius:calc(.25rem - 1px)}.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}@media (min-width:576px){.card-deck{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap}.card-deck .card{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-webkit-flex:1 0 0%;-ms-flex:1 0 0%;flex:1 0 0%;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column}.card-deck .card:not(:first-child){margin-left:15px}.card-deck .card:not(:last-child){margin-right:15px}}@media (min-width:576px){.card-group{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap}.card-group .card{-webkit-box-flex:1;-webkit-flex:1 0 0%;-ms-flex:1 0 0%;flex:1 0 0%}.card-group .card+.card{margin-left:0;border-left:0}.card-group .card:first-child{border-bottom-right-radius:0;border-top-right-radius:0}.card-group .card:first-child .card-img-top{border-top-right-radius:0}.card-group .card:first-child .card-img-bottom{border-bottom-right-radius:0}.card-group .card:last-child{border-bottom-left-radius:0;border-top-left-radius:0}.card-group .card:last-child .card-img-top{border-top-left-radius:0}.card-group .card:last-child .card-img-bottom{border-bottom-left-radius:0}.card-group .card:not(:first-child):not(:last-child){border-radius:0}.card-group .card:not(:first-child):not(:last-child) .card-img-bottom,.card-group .card:not(:first-child):not(:last-child) .card-img-top{border-radius:0}}@media (min-width:576px){.card-columns{-webkit-column-count:3;-moz-column-count:3;column-count:3;-webkit-column-gap:1.25rem;-moz-column-gap:1.25rem;column-gap:1.25rem}.card-columns .card{display:inline-block;width:100%;margin-bottom:.75rem}}.breadcrumb{padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#eceeef;border-radius:.25rem}.breadcrumb::after{display:block;content:"";clear:both}.breadcrumb-item{float:left}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:.5rem;padding-left:.5rem;color:#636c72;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#636c72}.pagination{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;padding-left:0;list-style:none;border-radius:.25rem}.page-item:first-child .page-link{margin-left:0;border-bottom-left-radius:.25rem;border-top-left-radius:.25rem}.page-item:last-child .page-link{border-bottom-right-radius:.25rem;border-top-right-radius:.25rem}.page-item.active .page-link{z-index:2;color:#fff;background-color:#0275d8;border-color:#0275d8}.page-item.disabled .page-link{color:#636c72;pointer-events:none;cursor:not-allowed;background-color:#fff;border-color:#ddd}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#0275d8;background-color:#fff;border:1px solid #ddd}.page-link:focus,.page-link:hover{color:#014c8c;text-decoration:none;background-color:#eceeef;border-color:#ddd}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem}.pagination-lg .page-item:first-child .page-link{border-bottom-left-radius:.3rem;border-top-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-bottom-right-radius:.3rem;border-top-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem}.pagination-sm .page-item:first-child .page-link{border-bottom-left-radius:.2rem;border-top-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-bottom-right-radius:.2rem;border-top-right-radius:.2rem}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-default{background-color:#636c72}.badge-default[href]:focus,.badge-default[href]:hover{background-color:#4b5257}.badge-primary{background-color:#0275d8}.badge-primary[href]:focus,.badge-primary[href]:hover{background-color:#025aa5}.badge-success{background-color:#5cb85c}.badge-success[href]:focus,.badge-success[href]:hover{background-color:#449d44}.badge-info{background-color:#5bc0de}.badge-info[href]:focus,.badge-info[href]:hover{background-color:#31b0d5}.badge-warning{background-color:#f0ad4e}.badge-warning[href]:focus,.badge-warning[href]:hover{background-color:#ec971f}.badge-danger{background-color:#d9534f}.badge-danger[href]:focus,.badge-danger[href]:hover{background-color:#c9302c}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#eceeef;border-radius:.3rem}@media (min-width:576px){.jumbotron{padding:4rem 2rem}}.jumbotron-hr{border-top-color:#d0d5d8}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible .close{position:relative;top:-.75rem;right:-1.25rem;padding:.75rem 1.25rem;color:inherit}.alert-success{background-color:#dff0d8;border-color:#d0e9c6;color:#3c763d}.alert-success hr{border-top-color:#c1e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{background-color:#d9edf7;border-color:#bcdff1;color:#31708f}.alert-info hr{border-top-color:#a6d5ec}.alert-info .alert-link{color:#245269}.alert-warning{background-color:#fcf8e3;border-color:#faf2cc;color:#8a6d3b}.alert-warning hr{border-top-color:#f7ecb5}.alert-warning .alert-link{color:#66512c}.alert-danger{background-color:#f2dede;border-color:#ebcccc;color:#a94442}.alert-danger hr{border-top-color:#e4b9b9}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;overflow:hidden;font-size:.75rem;line-height:1rem;text-align:center;background-color:#eceeef;border-radius:.25rem}.progress-bar{height:1rem;color:#fff;background-color:#0275d8}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:1rem 1rem;background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;-o-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}.media{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:start;-webkit-align-items:flex-start;-ms-flex-align:start;align-items:flex-start}.media-body{-webkit-box-flex:1;-webkit-flex:1 1 0%;-ms-flex:1 1 0%;flex:1 1 0%}.list-group{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#464a4c;text-align:inherit}.list-group-item-action .list-group-item-heading{color:#292b2c}.list-group-item-action:focus,.list-group-item-action:hover{color:#464a4c;text-decoration:none;background-color:#f7f7f9}.list-group-item-action:active{color:#292b2c;background-color:#eceeef}.list-group-item{position:relative;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;padding:.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-right-radius:.25rem;border-top-left-radius:.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.list-group-item:focus,.list-group-item:hover{text-decoration:none}.list-group-item.disabled,.list-group-item:disabled{color:#636c72;cursor:not-allowed;background-color:#fff}.list-group-item.disabled .list-group-item-heading,.list-group-item:disabled .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item:disabled .list-group-item-text{color:#636c72}.list-group-item.active{z-index:2;color:#fff;background-color:#0275d8;border-color:#0275d8}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text{color:#daeeff}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom:0}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,button.list-group-item-success.active{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,button.list-group-item-info.active{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,button.list-group-item-warning.active{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,button.list-group-item-danger.active{color:#fff;background-color:#a94442;border-color:#a94442}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.857143%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;opacity:.75}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;transition:transform .3s ease-out,-webkit-transform .3s ease-out,-o-transform .3s ease-out;-webkit-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.show .modal-dialog{-webkit-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-webkit-flex-direction:column;-ms-flex-direction:column;flex-direction:column;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-webkit-justify-content:space-between;-ms-flex-pack:justify;justify-content:space-between;padding:15px;border-bottom:1px solid #eceeef}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;-webkit-box-flex:1;-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto;padding:15px}.modal-footer{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:end;-webkit-justify-content:flex-end;-ms-flex-pack:end;justify-content:flex-end;padding:15px;border-top:1px solid #eceeef}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:576px){.modal-dialog{max-width:500px;margin:30px auto}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg{max-width:800px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:-apple-system,system-ui,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip.bs-tether-element-attached-bottom,.tooltip.tooltip-top{padding:5px 0;margin-top:-3px}.tooltip.bs-tether-element-attached-bottom .tooltip-inner::before,.tooltip.tooltip-top .tooltip-inner::before{bottom:0;left:50%;margin-left:-5px;content:"";border-width:5px 5px 0;border-top-color:#000}.tooltip.bs-tether-element-attached-left,.tooltip.tooltip-right{padding:0 5px;margin-left:3px}.tooltip.bs-tether-element-attached-left .tooltip-inner::before,.tooltip.tooltip-right .tooltip-inner::before{top:50%;left:0;margin-top:-5px;content:"";border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.bs-tether-element-attached-top,.tooltip.tooltip-bottom{padding:5px 0;margin-top:3px}.tooltip.bs-tether-element-attached-top .tooltip-inner::before,.tooltip.tooltip-bottom .tooltip-inner::before{top:0;left:50%;margin-left:-5px;content:"";border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bs-tether-element-attached-right,.tooltip.tooltip-left{padding:0 5px;margin-left:-3px}.tooltip.bs-tether-element-attached-right .tooltip-inner::before,.tooltip.tooltip-left .tooltip-inner::before{top:50%;right:0;margin-top:-5px;content:"";border-width:5px 0 5px 5px;border-left-color:#000}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.tooltip-inner::before{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;padding:1px;font-family:-apple-system,system-ui,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;font-size:.875rem;word-wrap:break-word;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover.bs-tether-element-attached-bottom,.popover.popover-top{margin-top:-10px}.popover.bs-tether-element-attached-bottom::after,.popover.bs-tether-element-attached-bottom::before,.popover.popover-top::after,.popover.popover-top::before{left:50%;border-bottom-width:0}.popover.bs-tether-element-attached-bottom::before,.popover.popover-top::before{bottom:-11px;margin-left:-11px;border-top-color:rgba(0,0,0,.25)}.popover.bs-tether-element-attached-bottom::after,.popover.popover-top::after{bottom:-10px;margin-left:-10px;border-top-color:#fff}.popover.bs-tether-element-attached-left,.popover.popover-right{margin-left:10px}.popover.bs-tether-element-attached-left::after,.popover.bs-tether-element-attached-left::before,.popover.popover-right::after,.popover.popover-right::before{top:50%;border-left-width:0}.popover.bs-tether-element-attached-left::before,.popover.popover-right::before{left:-11px;margin-top:-11px;border-right-color:rgba(0,0,0,.25)}.popover.bs-tether-element-attached-left::after,.popover.popover-right::after{left:-10px;margin-top:-10px;border-right-color:#fff}.popover.bs-tether-element-attached-top,.popover.popover-bottom{margin-top:10px}.popover.bs-tether-element-attached-top::after,.popover.bs-tether-element-attached-top::before,.popover.popover-bottom::after,.popover.popover-bottom::before{left:50%;border-top-width:0}.popover.bs-tether-element-attached-top::before,.popover.popover-bottom::before{top:-11px;margin-left:-11px;border-bottom-color:rgba(0,0,0,.25)}.popover.bs-tether-element-attached-top::after,.popover.popover-bottom::after{top:-10px;margin-left:-10px;border-bottom-color:#f7f7f7}.popover.bs-tether-element-attached-top .popover-title::before,.popover.popover-bottom .popover-title::before{position:absolute;top:0;left:50%;display:block;width:20px;margin-left:-10px;content:"";border-bottom:1px solid #f7f7f7}.popover.bs-tether-element-attached-right,.popover.popover-left{margin-left:-10px}.popover.bs-tether-element-attached-right::after,.popover.bs-tether-element-attached-right::before,.popover.popover-left::after,.popover.popover-left::before{top:50%;border-right-width:0}.popover.bs-tether-element-attached-right::before,.popover.popover-left::before{right:-11px;margin-top:-11px;border-left-color:rgba(0,0,0,.25)}.popover.bs-tether-element-attached-right::after,.popover.popover-left::after{right:-10px;margin-top:-10px;border-left-color:#fff}.popover-title{padding:8px 14px;margin-bottom:0;font-size:1rem;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-right-radius:calc(.3rem - 1px);border-top-left-radius:calc(.3rem - 1px)}.popover-title:empty{display:none}.popover-content{padding:9px 14px}.popover::after,.popover::before{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover::before{content:"";border-width:11px}.popover::after{content:"";border-width:10px}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-item{position:relative;display:none;width:100%}@media (-webkit-transform-3d){.carousel-item{-webkit-transition:-webkit-transform .6s ease-in-out;transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out,-o-transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}}@supports ((-webkit-transform:translate3d(0,0,0)) or (transform:translate3d(0,0,0))){.carousel-item{-webkit-transition:-webkit-transform .6s ease-in-out;transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out,-o-transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex}.carousel-item-next,.carousel-item-prev{position:absolute;top:0}@media (-webkit-transform-3d){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.active.carousel-item-right,.carousel-item-next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.active.carousel-item-left,.carousel-item-prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@supports ((-webkit-transform:translate3d(0,0,0)) or (transform:translate3d(0,0,0))){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.active.carousel-item-right,.carousel-item-next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.active.carousel-item-left,.carousel-item-prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-align:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:20px;height:20px;background:transparent no-repeat center center;-webkit-background-size:100% 100%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M4 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M1.5 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:10px;left:0;z-index:15;display:-webkit-box;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{position:relative;-webkit-box-flex:1;-webkit-flex:1 0 auto;-ms-flex:1 0 auto;flex:1 0 auto;max-width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:rgba(255,255,255,.5)}.carousel-indicators li::before{position:absolute;top:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators li::after{position:absolute;bottom:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-faded{background-color:#f7f7f7}.bg-primary{background-color:#0275d8!important}a.bg-primary:focus,a.bg-primary:hover{background-color:#025aa5!important}.bg-success{background-color:#5cb85c!important}a.bg-success:focus,a.bg-success:hover{background-color:#449d44!important}.bg-info{background-color:#5bc0de!important}a.bg-info:focus,a.bg-info:hover{background-color:#31b0d5!important}.bg-warning{background-color:#f0ad4e!important}a.bg-warning:focus,a.bg-warning:hover{background-color:#ec971f!important}.bg-danger{background-color:#d9534f!important}a.bg-danger:focus,a.bg-danger:hover{background-color:#c9302c!important}.bg-inverse{background-color:#292b2c!important}a.bg-inverse:focus,a.bg-inverse:hover{background-color:#101112!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.rounded{border-radius:.25rem}.rounded-top{border-top-right-radius:.25rem;border-top-left-radius:.25rem}.rounded-right{border-bottom-right-radius:.25rem;border-top-right-radius:.25rem}.rounded-bottom{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.rounded-left{border-bottom-left-radius:.25rem;border-top-left-radius:.25rem}.rounded-circle{border-radius:50%}.rounded-0{border-radius:0}.clearfix::after{display:block;content:"";clear:both}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-cell{display:table-cell!important}.d-flex{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important}.d-inline-flex{display:-webkit-inline-box!important;display:-webkit-inline-flex!important;display:-ms-inline-flexbox!important;display:inline-flex!important}@media (min-width:576px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important}.d-sm-inline-flex{display:-webkit-inline-box!important;display:-webkit-inline-flex!important;display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important}.d-md-inline-flex{display:-webkit-inline-box!important;display:-webkit-inline-flex!important;display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:992px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important}.d-lg-inline-flex{display:-webkit-inline-box!important;display:-webkit-inline-flex!important;display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:1200px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:-webkit-box!important;display:-webkit-flex!important;display:-ms-flexbox!important;display:flex!important}.d-xl-inline-flex{display:-webkit-inline-box!important;display:-webkit-inline-flex!important;display:-ms-inline-flexbox!important;display:inline-flex!important}}.flex-first{-webkit-box-ordinal-group:0;-webkit-order:-1;-ms-flex-order:-1;order:-1}.flex-last{-webkit-box-ordinal-group:2;-webkit-order:1;-ms-flex-order:1;order:1}.flex-unordered{-webkit-box-ordinal-group:1;-webkit-order:0;-ms-flex-order:0;order:0}.flex-row{-webkit-box-orient:horizontal!important;-webkit-box-direction:normal!important;-webkit-flex-direction:row!important;-ms-flex-direction:row!important;flex-direction:row!important}.flex-column{-webkit-box-orient:vertical!important;-webkit-box-direction:normal!important;-webkit-flex-direction:column!important;-ms-flex-direction:column!important;flex-direction:column!important}.flex-row-reverse{-webkit-box-orient:horizontal!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:row-reverse!important;-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-column-reverse{-webkit-box-orient:vertical!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:column-reverse!important;-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-wrap{-webkit-flex-wrap:wrap!important;-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-nowrap{-webkit-flex-wrap:nowrap!important;-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-wrap-reverse{-webkit-flex-wrap:wrap-reverse!important;-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.justify-content-start{-webkit-box-pack:start!important;-webkit-justify-content:flex-start!important;-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-end{-webkit-box-pack:end!important;-webkit-justify-content:flex-end!important;-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-center{-webkit-box-pack:center!important;-webkit-justify-content:center!important;-ms-flex-pack:center!important;justify-content:center!important}.justify-content-between{-webkit-box-pack:justify!important;-webkit-justify-content:space-between!important;-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-around{-webkit-justify-content:space-around!important;-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-start{-webkit-box-align:start!important;-webkit-align-items:flex-start!important;-ms-flex-align:start!important;align-items:flex-start!important}.align-items-end{-webkit-box-align:end!important;-webkit-align-items:flex-end!important;-ms-flex-align:end!important;align-items:flex-end!important}.align-items-center{-webkit-box-align:center!important;-webkit-align-items:center!important;-ms-flex-align:center!important;align-items:center!important}.align-items-baseline{-webkit-box-align:baseline!important;-webkit-align-items:baseline!important;-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-stretch{-webkit-box-align:stretch!important;-webkit-align-items:stretch!important;-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-start{-webkit-align-content:flex-start!important;-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-end{-webkit-align-content:flex-end!important;-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-center{-webkit-align-content:center!important;-ms-flex-line-pack:center!important;align-content:center!important}.align-content-between{-webkit-align-content:space-between!important;-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-around{-webkit-align-content:space-around!important;-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-stretch{-webkit-align-content:stretch!important;-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-auto{-webkit-align-self:auto!important;-ms-flex-item-align:auto!important;-ms-grid-row-align:auto!important;align-self:auto!important}.align-self-start{-webkit-align-self:flex-start!important;-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-end{-webkit-align-self:flex-end!important;-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-center{-webkit-align-self:center!important;-ms-flex-item-align:center!important;-ms-grid-row-align:center!important;align-self:center!important}.align-self-baseline{-webkit-align-self:baseline!important;-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-stretch{-webkit-align-self:stretch!important;-ms-flex-item-align:stretch!important;-ms-grid-row-align:stretch!important;align-self:stretch!important}@media (min-width:576px){.flex-sm-first{-webkit-box-ordinal-group:0;-webkit-order:-1;-ms-flex-order:-1;order:-1}.flex-sm-last{-webkit-box-ordinal-group:2;-webkit-order:1;-ms-flex-order:1;order:1}.flex-sm-unordered{-webkit-box-ordinal-group:1;-webkit-order:0;-ms-flex-order:0;order:0}.flex-sm-row{-webkit-box-orient:horizontal!important;-webkit-box-direction:normal!important;-webkit-flex-direction:row!important;-ms-flex-direction:row!important;flex-direction:row!important}.flex-sm-column{-webkit-box-orient:vertical!important;-webkit-box-direction:normal!important;-webkit-flex-direction:column!important;-ms-flex-direction:column!important;flex-direction:column!important}.flex-sm-row-reverse{-webkit-box-orient:horizontal!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:row-reverse!important;-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-sm-column-reverse{-webkit-box-orient:vertical!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:column-reverse!important;-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-sm-wrap{-webkit-flex-wrap:wrap!important;-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-sm-nowrap{-webkit-flex-wrap:nowrap!important;-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-sm-wrap-reverse{-webkit-flex-wrap:wrap-reverse!important;-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.justify-content-sm-start{-webkit-box-pack:start!important;-webkit-justify-content:flex-start!important;-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-sm-end{-webkit-box-pack:end!important;-webkit-justify-content:flex-end!important;-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-sm-center{-webkit-box-pack:center!important;-webkit-justify-content:center!important;-ms-flex-pack:center!important;justify-content:center!important}.justify-content-sm-between{-webkit-box-pack:justify!important;-webkit-justify-content:space-between!important;-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-sm-around{-webkit-justify-content:space-around!important;-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-sm-start{-webkit-box-align:start!important;-webkit-align-items:flex-start!important;-ms-flex-align:start!important;align-items:flex-start!important}.align-items-sm-end{-webkit-box-align:end!important;-webkit-align-items:flex-end!important;-ms-flex-align:end!important;align-items:flex-end!important}.align-items-sm-center{-webkit-box-align:center!important;-webkit-align-items:center!important;-ms-flex-align:center!important;align-items:center!important}.align-items-sm-baseline{-webkit-box-align:baseline!important;-webkit-align-items:baseline!important;-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-sm-stretch{-webkit-box-align:stretch!important;-webkit-align-items:stretch!important;-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-sm-start{-webkit-align-content:flex-start!important;-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-sm-end{-webkit-align-content:flex-end!important;-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-sm-center{-webkit-align-content:center!important;-ms-flex-line-pack:center!important;align-content:center!important}.align-content-sm-between{-webkit-align-content:space-between!important;-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-sm-around{-webkit-align-content:space-around!important;-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-sm-stretch{-webkit-align-content:stretch!important;-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-sm-auto{-webkit-align-self:auto!important;-ms-flex-item-align:auto!important;-ms-grid-row-align:auto!important;align-self:auto!important}.align-self-sm-start{-webkit-align-self:flex-start!important;-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-sm-end{-webkit-align-self:flex-end!important;-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-sm-center{-webkit-align-self:center!important;-ms-flex-item-align:center!important;-ms-grid-row-align:center!important;align-self:center!important}.align-self-sm-baseline{-webkit-align-self:baseline!important;-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-sm-stretch{-webkit-align-self:stretch!important;-ms-flex-item-align:stretch!important;-ms-grid-row-align:stretch!important;align-self:stretch!important}}@media (min-width:768px){.flex-md-first{-webkit-box-ordinal-group:0;-webkit-order:-1;-ms-flex-order:-1;order:-1}.flex-md-last{-webkit-box-ordinal-group:2;-webkit-order:1;-ms-flex-order:1;order:1}.flex-md-unordered{-webkit-box-ordinal-group:1;-webkit-order:0;-ms-flex-order:0;order:0}.flex-md-row{-webkit-box-orient:horizontal!important;-webkit-box-direction:normal!important;-webkit-flex-direction:row!important;-ms-flex-direction:row!important;flex-direction:row!important}.flex-md-column{-webkit-box-orient:vertical!important;-webkit-box-direction:normal!important;-webkit-flex-direction:column!important;-ms-flex-direction:column!important;flex-direction:column!important}.flex-md-row-reverse{-webkit-box-orient:horizontal!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:row-reverse!important;-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-md-column-reverse{-webkit-box-orient:vertical!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:column-reverse!important;-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-md-wrap{-webkit-flex-wrap:wrap!important;-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-md-nowrap{-webkit-flex-wrap:nowrap!important;-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-md-wrap-reverse{-webkit-flex-wrap:wrap-reverse!important;-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.justify-content-md-start{-webkit-box-pack:start!important;-webkit-justify-content:flex-start!important;-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-md-end{-webkit-box-pack:end!important;-webkit-justify-content:flex-end!important;-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-md-center{-webkit-box-pack:center!important;-webkit-justify-content:center!important;-ms-flex-pack:center!important;justify-content:center!important}.justify-content-md-between{-webkit-box-pack:justify!important;-webkit-justify-content:space-between!important;-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-md-around{-webkit-justify-content:space-around!important;-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-md-start{-webkit-box-align:start!important;-webkit-align-items:flex-start!important;-ms-flex-align:start!important;align-items:flex-start!important}.align-items-md-end{-webkit-box-align:end!important;-webkit-align-items:flex-end!important;-ms-flex-align:end!important;align-items:flex-end!important}.align-items-md-center{-webkit-box-align:center!important;-webkit-align-items:center!important;-ms-flex-align:center!important;align-items:center!important}.align-items-md-baseline{-webkit-box-align:baseline!important;-webkit-align-items:baseline!important;-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-md-stretch{-webkit-box-align:stretch!important;-webkit-align-items:stretch!important;-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-md-start{-webkit-align-content:flex-start!important;-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-md-end{-webkit-align-content:flex-end!important;-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-md-center{-webkit-align-content:center!important;-ms-flex-line-pack:center!important;align-content:center!important}.align-content-md-between{-webkit-align-content:space-between!important;-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-md-around{-webkit-align-content:space-around!important;-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-md-stretch{-webkit-align-content:stretch!important;-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-md-auto{-webkit-align-self:auto!important;-ms-flex-item-align:auto!important;-ms-grid-row-align:auto!important;align-self:auto!important}.align-self-md-start{-webkit-align-self:flex-start!important;-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-md-end{-webkit-align-self:flex-end!important;-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-md-center{-webkit-align-self:center!important;-ms-flex-item-align:center!important;-ms-grid-row-align:center!important;align-self:center!important}.align-self-md-baseline{-webkit-align-self:baseline!important;-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-md-stretch{-webkit-align-self:stretch!important;-ms-flex-item-align:stretch!important;-ms-grid-row-align:stretch!important;align-self:stretch!important}}@media (min-width:992px){.flex-lg-first{-webkit-box-ordinal-group:0;-webkit-order:-1;-ms-flex-order:-1;order:-1}.flex-lg-last{-webkit-box-ordinal-group:2;-webkit-order:1;-ms-flex-order:1;order:1}.flex-lg-unordered{-webkit-box-ordinal-group:1;-webkit-order:0;-ms-flex-order:0;order:0}.flex-lg-row{-webkit-box-orient:horizontal!important;-webkit-box-direction:normal!important;-webkit-flex-direction:row!important;-ms-flex-direction:row!important;flex-direction:row!important}.flex-lg-column{-webkit-box-orient:vertical!important;-webkit-box-direction:normal!important;-webkit-flex-direction:column!important;-ms-flex-direction:column!important;flex-direction:column!important}.flex-lg-row-reverse{-webkit-box-orient:horizontal!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:row-reverse!important;-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-lg-column-reverse{-webkit-box-orient:vertical!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:column-reverse!important;-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-lg-wrap{-webkit-flex-wrap:wrap!important;-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-lg-nowrap{-webkit-flex-wrap:nowrap!important;-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-lg-wrap-reverse{-webkit-flex-wrap:wrap-reverse!important;-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.justify-content-lg-start{-webkit-box-pack:start!important;-webkit-justify-content:flex-start!important;-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-lg-end{-webkit-box-pack:end!important;-webkit-justify-content:flex-end!important;-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-lg-center{-webkit-box-pack:center!important;-webkit-justify-content:center!important;-ms-flex-pack:center!important;justify-content:center!important}.justify-content-lg-between{-webkit-box-pack:justify!important;-webkit-justify-content:space-between!important;-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-lg-around{-webkit-justify-content:space-around!important;-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-lg-start{-webkit-box-align:start!important;-webkit-align-items:flex-start!important;-ms-flex-align:start!important;align-items:flex-start!important}.align-items-lg-end{-webkit-box-align:end!important;-webkit-align-items:flex-end!important;-ms-flex-align:end!important;align-items:flex-end!important}.align-items-lg-center{-webkit-box-align:center!important;-webkit-align-items:center!important;-ms-flex-align:center!important;align-items:center!important}.align-items-lg-baseline{-webkit-box-align:baseline!important;-webkit-align-items:baseline!important;-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-lg-stretch{-webkit-box-align:stretch!important;-webkit-align-items:stretch!important;-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-lg-start{-webkit-align-content:flex-start!important;-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-lg-end{-webkit-align-content:flex-end!important;-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-lg-center{-webkit-align-content:center!important;-ms-flex-line-pack:center!important;align-content:center!important}.align-content-lg-between{-webkit-align-content:space-between!important;-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-lg-around{-webkit-align-content:space-around!important;-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-lg-stretch{-webkit-align-content:stretch!important;-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-lg-auto{-webkit-align-self:auto!important;-ms-flex-item-align:auto!important;-ms-grid-row-align:auto!important;align-self:auto!important}.align-self-lg-start{-webkit-align-self:flex-start!important;-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-lg-end{-webkit-align-self:flex-end!important;-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-lg-center{-webkit-align-self:center!important;-ms-flex-item-align:center!important;-ms-grid-row-align:center!important;align-self:center!important}.align-self-lg-baseline{-webkit-align-self:baseline!important;-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-lg-stretch{-webkit-align-self:stretch!important;-ms-flex-item-align:stretch!important;-ms-grid-row-align:stretch!important;align-self:stretch!important}}@media (min-width:1200px){.flex-xl-first{-webkit-box-ordinal-group:0;-webkit-order:-1;-ms-flex-order:-1;order:-1}.flex-xl-last{-webkit-box-ordinal-group:2;-webkit-order:1;-ms-flex-order:1;order:1}.flex-xl-unordered{-webkit-box-ordinal-group:1;-webkit-order:0;-ms-flex-order:0;order:0}.flex-xl-row{-webkit-box-orient:horizontal!important;-webkit-box-direction:normal!important;-webkit-flex-direction:row!important;-ms-flex-direction:row!important;flex-direction:row!important}.flex-xl-column{-webkit-box-orient:vertical!important;-webkit-box-direction:normal!important;-webkit-flex-direction:column!important;-ms-flex-direction:column!important;flex-direction:column!important}.flex-xl-row-reverse{-webkit-box-orient:horizontal!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:row-reverse!important;-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-xl-column-reverse{-webkit-box-orient:vertical!important;-webkit-box-direction:reverse!important;-webkit-flex-direction:column-reverse!important;-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-xl-wrap{-webkit-flex-wrap:wrap!important;-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-xl-nowrap{-webkit-flex-wrap:nowrap!important;-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-xl-wrap-reverse{-webkit-flex-wrap:wrap-reverse!important;-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.justify-content-xl-start{-webkit-box-pack:start!important;-webkit-justify-content:flex-start!important;-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-xl-end{-webkit-box-pack:end!important;-webkit-justify-content:flex-end!important;-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-xl-center{-webkit-box-pack:center!important;-webkit-justify-content:center!important;-ms-flex-pack:center!important;justify-content:center!important}.justify-content-xl-between{-webkit-box-pack:justify!important;-webkit-justify-content:space-between!important;-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-xl-around{-webkit-justify-content:space-around!important;-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-xl-start{-webkit-box-align:start!important;-webkit-align-items:flex-start!important;-ms-flex-align:start!important;align-items:flex-start!important}.align-items-xl-end{-webkit-box-align:end!important;-webkit-align-items:flex-end!important;-ms-flex-align:end!important;align-items:flex-end!important}.align-items-xl-center{-webkit-box-align:center!important;-webkit-align-items:center!important;-ms-flex-align:center!important;align-items:center!important}.align-items-xl-baseline{-webkit-box-align:baseline!important;-webkit-align-items:baseline!important;-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-xl-stretch{-webkit-box-align:stretch!important;-webkit-align-items:stretch!important;-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-xl-start{-webkit-align-content:flex-start!important;-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-xl-end{-webkit-align-content:flex-end!important;-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-xl-center{-webkit-align-content:center!important;-ms-flex-line-pack:center!important;align-content:center!important}.align-content-xl-between{-webkit-align-content:space-between!important;-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-xl-around{-webkit-align-content:space-around!important;-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-xl-stretch{-webkit-align-content:stretch!important;-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-xl-auto{-webkit-align-self:auto!important;-ms-flex-item-align:auto!important;-ms-grid-row-align:auto!important;align-self:auto!important}.align-self-xl-start{-webkit-align-self:flex-start!important;-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-xl-end{-webkit-align-self:flex-end!important;-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-xl-center{-webkit-align-self:center!important;-ms-flex-item-align:center!important;-ms-grid-row-align:center!important;align-self:center!important}.align-self-xl-baseline{-webkit-align-self:baseline!important;-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-xl-stretch{-webkit-align-self:stretch!important;-ms-flex-item-align:stretch!important;-ms-grid-row-align:stretch!important;align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media (min-width:576px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media (min-width:768px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media (min-width:992px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media (min-width:1200px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1030}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.m-0{margin:0 0!important}.mt-0{margin-top:0!important}.mr-0{margin-right:0!important}.mb-0{margin-bottom:0!important}.ml-0{margin-left:0!important}.mx-0{margin-right:0!important;margin-left:0!important}.my-0{margin-top:0!important;margin-bottom:0!important}.m-1{margin:.25rem .25rem!important}.mt-1{margin-top:.25rem!important}.mr-1{margin-right:.25rem!important}.mb-1{margin-bottom:.25rem!important}.ml-1{margin-left:.25rem!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-2{margin:.5rem .5rem!important}.mt-2{margin-top:.5rem!important}.mr-2{margin-right:.5rem!important}.mb-2{margin-bottom:.5rem!important}.ml-2{margin-left:.5rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-3{margin:1rem 1rem!important}.mt-3{margin-top:1rem!important}.mr-3{margin-right:1rem!important}.mb-3{margin-bottom:1rem!important}.ml-3{margin-left:1rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.m-4{margin:1.5rem 1.5rem!important}.mt-4{margin-top:1.5rem!important}.mr-4{margin-right:1.5rem!important}.mb-4{margin-bottom:1.5rem!important}.ml-4{margin-left:1.5rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-5{margin:3rem 3rem!important}.mt-5{margin-top:3rem!important}.mr-5{margin-right:3rem!important}.mb-5{margin-bottom:3rem!important}.ml-5{margin-left:3rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.p-0{padding:0 0!important}.pt-0{padding-top:0!important}.pr-0{padding-right:0!important}.pb-0{padding-bottom:0!important}.pl-0{padding-left:0!important}.px-0{padding-right:0!important;padding-left:0!important}.py-0{padding-top:0!important;padding-bottom:0!important}.p-1{padding:.25rem .25rem!important}.pt-1{padding-top:.25rem!important}.pr-1{padding-right:.25rem!important}.pb-1{padding-bottom:.25rem!important}.pl-1{padding-left:.25rem!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-2{padding:.5rem .5rem!important}.pt-2{padding-top:.5rem!important}.pr-2{padding-right:.5rem!important}.pb-2{padding-bottom:.5rem!important}.pl-2{padding-left:.5rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-3{padding:1rem 1rem!important}.pt-3{padding-top:1rem!important}.pr-3{padding-right:1rem!important}.pb-3{padding-bottom:1rem!important}.pl-3{padding-left:1rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.p-4{padding:1.5rem 1.5rem!important}.pt-4{padding-top:1.5rem!important}.pr-4{padding-right:1.5rem!important}.pb-4{padding-bottom:1.5rem!important}.pl-4{padding-left:1.5rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-5{padding:3rem 3rem!important}.pt-5{padding-top:3rem!important}.pr-5{padding-right:3rem!important}.pb-5{padding-bottom:3rem!important}.pl-5{padding-left:3rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.m-auto{margin:auto!important}.mt-auto{margin-top:auto!important}.mr-auto{margin-right:auto!important}.mb-auto{margin-bottom:auto!important}.ml-auto{margin-left:auto!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}@media (min-width:576px){.m-sm-0{margin:0 0!important}.mt-sm-0{margin-top:0!important}.mr-sm-0{margin-right:0!important}.mb-sm-0{margin-bottom:0!important}.ml-sm-0{margin-left:0!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.m-sm-1{margin:.25rem .25rem!important}.mt-sm-1{margin-top:.25rem!important}.mr-sm-1{margin-right:.25rem!important}.mb-sm-1{margin-bottom:.25rem!important}.ml-sm-1{margin-left:.25rem!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-sm-2{margin:.5rem .5rem!important}.mt-sm-2{margin-top:.5rem!important}.mr-sm-2{margin-right:.5rem!important}.mb-sm-2{margin-bottom:.5rem!important}.ml-sm-2{margin-left:.5rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-sm-3{margin:1rem 1rem!important}.mt-sm-3{margin-top:1rem!important}.mr-sm-3{margin-right:1rem!important}.mb-sm-3{margin-bottom:1rem!important}.ml-sm-3{margin-left:1rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.m-sm-4{margin:1.5rem 1.5rem!important}.mt-sm-4{margin-top:1.5rem!important}.mr-sm-4{margin-right:1.5rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.ml-sm-4{margin-left:1.5rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-sm-5{margin:3rem 3rem!important}.mt-sm-5{margin-top:3rem!important}.mr-sm-5{margin-right:3rem!important}.mb-sm-5{margin-bottom:3rem!important}.ml-sm-5{margin-left:3rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.p-sm-0{padding:0 0!important}.pt-sm-0{padding-top:0!important}.pr-sm-0{padding-right:0!important}.pb-sm-0{padding-bottom:0!important}.pl-sm-0{padding-left:0!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.p-sm-1{padding:.25rem .25rem!important}.pt-sm-1{padding-top:.25rem!important}.pr-sm-1{padding-right:.25rem!important}.pb-sm-1{padding-bottom:.25rem!important}.pl-sm-1{padding-left:.25rem!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-sm-2{padding:.5rem .5rem!important}.pt-sm-2{padding-top:.5rem!important}.pr-sm-2{padding-right:.5rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pl-sm-2{padding-left:.5rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-sm-3{padding:1rem 1rem!important}.pt-sm-3{padding-top:1rem!important}.pr-sm-3{padding-right:1rem!important}.pb-sm-3{padding-bottom:1rem!important}.pl-sm-3{padding-left:1rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.p-sm-4{padding:1.5rem 1.5rem!important}.pt-sm-4{padding-top:1.5rem!important}.pr-sm-4{padding-right:1.5rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pl-sm-4{padding-left:1.5rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-sm-5{padding:3rem 3rem!important}.pt-sm-5{padding-top:3rem!important}.pr-sm-5{padding-right:3rem!important}.pb-sm-5{padding-bottom:3rem!important}.pl-sm-5{padding-left:3rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.m-sm-auto{margin:auto!important}.mt-sm-auto{margin-top:auto!important}.mr-sm-auto{margin-right:auto!important}.mb-sm-auto{margin-bottom:auto!important}.ml-sm-auto{margin-left:auto!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}}@media (min-width:768px){.m-md-0{margin:0 0!important}.mt-md-0{margin-top:0!important}.mr-md-0{margin-right:0!important}.mb-md-0{margin-bottom:0!important}.ml-md-0{margin-left:0!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.m-md-1{margin:.25rem .25rem!important}.mt-md-1{margin-top:.25rem!important}.mr-md-1{margin-right:.25rem!important}.mb-md-1{margin-bottom:.25rem!important}.ml-md-1{margin-left:.25rem!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-md-2{margin:.5rem .5rem!important}.mt-md-2{margin-top:.5rem!important}.mr-md-2{margin-right:.5rem!important}.mb-md-2{margin-bottom:.5rem!important}.ml-md-2{margin-left:.5rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-md-3{margin:1rem 1rem!important}.mt-md-3{margin-top:1rem!important}.mr-md-3{margin-right:1rem!important}.mb-md-3{margin-bottom:1rem!important}.ml-md-3{margin-left:1rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.m-md-4{margin:1.5rem 1.5rem!important}.mt-md-4{margin-top:1.5rem!important}.mr-md-4{margin-right:1.5rem!important}.mb-md-4{margin-bottom:1.5rem!important}.ml-md-4{margin-left:1.5rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-md-5{margin:3rem 3rem!important}.mt-md-5{margin-top:3rem!important}.mr-md-5{margin-right:3rem!important}.mb-md-5{margin-bottom:3rem!important}.ml-md-5{margin-left:3rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.p-md-0{padding:0 0!important}.pt-md-0{padding-top:0!important}.pr-md-0{padding-right:0!important}.pb-md-0{padding-bottom:0!important}.pl-md-0{padding-left:0!important}.px-md-0{padding-right:0!important;padding-left:0!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.p-md-1{padding:.25rem .25rem!important}.pt-md-1{padding-top:.25rem!important}.pr-md-1{padding-right:.25rem!important}.pb-md-1{padding-bottom:.25rem!important}.pl-md-1{padding-left:.25rem!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-md-2{padding:.5rem .5rem!important}.pt-md-2{padding-top:.5rem!important}.pr-md-2{padding-right:.5rem!important}.pb-md-2{padding-bottom:.5rem!important}.pl-md-2{padding-left:.5rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-md-3{padding:1rem 1rem!important}.pt-md-3{padding-top:1rem!important}.pr-md-3{padding-right:1rem!important}.pb-md-3{padding-bottom:1rem!important}.pl-md-3{padding-left:1rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.p-md-4{padding:1.5rem 1.5rem!important}.pt-md-4{padding-top:1.5rem!important}.pr-md-4{padding-right:1.5rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pl-md-4{padding-left:1.5rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-md-5{padding:3rem 3rem!important}.pt-md-5{padding-top:3rem!important}.pr-md-5{padding-right:3rem!important}.pb-md-5{padding-bottom:3rem!important}.pl-md-5{padding-left:3rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.m-md-auto{margin:auto!important}.mt-md-auto{margin-top:auto!important}.mr-md-auto{margin-right:auto!important}.mb-md-auto{margin-bottom:auto!important}.ml-md-auto{margin-left:auto!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}}@media (min-width:992px){.m-lg-0{margin:0 0!important}.mt-lg-0{margin-top:0!important}.mr-lg-0{margin-right:0!important}.mb-lg-0{margin-bottom:0!important}.ml-lg-0{margin-left:0!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.m-lg-1{margin:.25rem .25rem!important}.mt-lg-1{margin-top:.25rem!important}.mr-lg-1{margin-right:.25rem!important}.mb-lg-1{margin-bottom:.25rem!important}.ml-lg-1{margin-left:.25rem!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-lg-2{margin:.5rem .5rem!important}.mt-lg-2{margin-top:.5rem!important}.mr-lg-2{margin-right:.5rem!important}.mb-lg-2{margin-bottom:.5rem!important}.ml-lg-2{margin-left:.5rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-lg-3{margin:1rem 1rem!important}.mt-lg-3{margin-top:1rem!important}.mr-lg-3{margin-right:1rem!important}.mb-lg-3{margin-bottom:1rem!important}.ml-lg-3{margin-left:1rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.m-lg-4{margin:1.5rem 1.5rem!important}.mt-lg-4{margin-top:1.5rem!important}.mr-lg-4{margin-right:1.5rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.ml-lg-4{margin-left:1.5rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-lg-5{margin:3rem 3rem!important}.mt-lg-5{margin-top:3rem!important}.mr-lg-5{margin-right:3rem!important}.mb-lg-5{margin-bottom:3rem!important}.ml-lg-5{margin-left:3rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.p-lg-0{padding:0 0!important}.pt-lg-0{padding-top:0!important}.pr-lg-0{padding-right:0!important}.pb-lg-0{padding-bottom:0!important}.pl-lg-0{padding-left:0!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.p-lg-1{padding:.25rem .25rem!important}.pt-lg-1{padding-top:.25rem!important}.pr-lg-1{padding-right:.25rem!important}.pb-lg-1{padding-bottom:.25rem!important}.pl-lg-1{padding-left:.25rem!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-lg-2{padding:.5rem .5rem!important}.pt-lg-2{padding-top:.5rem!important}.pr-lg-2{padding-right:.5rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pl-lg-2{padding-left:.5rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-lg-3{padding:1rem 1rem!important}.pt-lg-3{padding-top:1rem!important}.pr-lg-3{padding-right:1rem!important}.pb-lg-3{padding-bottom:1rem!important}.pl-lg-3{padding-left:1rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.p-lg-4{padding:1.5rem 1.5rem!important}.pt-lg-4{padding-top:1.5rem!important}.pr-lg-4{padding-right:1.5rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pl-lg-4{padding-left:1.5rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-lg-5{padding:3rem 3rem!important}.pt-lg-5{padding-top:3rem!important}.pr-lg-5{padding-right:3rem!important}.pb-lg-5{padding-bottom:3rem!important}.pl-lg-5{padding-left:3rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.m-lg-auto{margin:auto!important}.mt-lg-auto{margin-top:auto!important}.mr-lg-auto{margin-right:auto!important}.mb-lg-auto{margin-bottom:auto!important}.ml-lg-auto{margin-left:auto!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}}@media (min-width:1200px){.m-xl-0{margin:0 0!important}.mt-xl-0{margin-top:0!important}.mr-xl-0{margin-right:0!important}.mb-xl-0{margin-bottom:0!important}.ml-xl-0{margin-left:0!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.m-xl-1{margin:.25rem .25rem!important}.mt-xl-1{margin-top:.25rem!important}.mr-xl-1{margin-right:.25rem!important}.mb-xl-1{margin-bottom:.25rem!important}.ml-xl-1{margin-left:.25rem!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.m-xl-2{margin:.5rem .5rem!important}.mt-xl-2{margin-top:.5rem!important}.mr-xl-2{margin-right:.5rem!important}.mb-xl-2{margin-bottom:.5rem!important}.ml-xl-2{margin-left:.5rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.m-xl-3{margin:1rem 1rem!important}.mt-xl-3{margin-top:1rem!important}.mr-xl-3{margin-right:1rem!important}.mb-xl-3{margin-bottom:1rem!important}.ml-xl-3{margin-left:1rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.m-xl-4{margin:1.5rem 1.5rem!important}.mt-xl-4{margin-top:1.5rem!important}.mr-xl-4{margin-right:1.5rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.ml-xl-4{margin-left:1.5rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.m-xl-5{margin:3rem 3rem!important}.mt-xl-5{margin-top:3rem!important}.mr-xl-5{margin-right:3rem!important}.mb-xl-5{margin-bottom:3rem!important}.ml-xl-5{margin-left:3rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.p-xl-0{padding:0 0!important}.pt-xl-0{padding-top:0!important}.pr-xl-0{padding-right:0!important}.pb-xl-0{padding-bottom:0!important}.pl-xl-0{padding-left:0!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.p-xl-1{padding:.25rem .25rem!important}.pt-xl-1{padding-top:.25rem!important}.pr-xl-1{padding-right:.25rem!important}.pb-xl-1{padding-bottom:.25rem!important}.pl-xl-1{padding-left:.25rem!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.p-xl-2{padding:.5rem .5rem!important}.pt-xl-2{padding-top:.5rem!important}.pr-xl-2{padding-right:.5rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pl-xl-2{padding-left:.5rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.p-xl-3{padding:1rem 1rem!important}.pt-xl-3{padding-top:1rem!important}.pr-xl-3{padding-right:1rem!important}.pb-xl-3{padding-bottom:1rem!important}.pl-xl-3{padding-left:1rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.p-xl-4{padding:1.5rem 1.5rem!important}.pt-xl-4{padding-top:1.5rem!important}.pr-xl-4{padding-right:1.5rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pl-xl-4{padding-left:1.5rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.p-xl-5{padding:3rem 3rem!important}.pt-xl-5{padding-top:3rem!important}.pr-xl-5{padding-right:3rem!important}.pb-xl-5{padding-bottom:3rem!important}.pl-xl-5{padding-left:3rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.m-xl-auto{margin:auto!important}.mt-xl-auto{margin-top:auto!important}.mr-xl-auto{margin-right:auto!important}.mb-xl-auto{margin-bottom:auto!important}.ml-xl-auto{margin-left:auto!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}}.text-justify{text-align:justify!important}.text-nowrap{white-space:nowrap!important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media (min-width:576px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-normal{font-weight:400}.font-weight-bold{font-weight:700}.font-italic{font-style:italic}.text-white{color:#fff!important}.text-muted{color:#636c72!important}a.text-muted:focus,a.text-muted:hover{color:#4b5257!important}.text-primary{color:#0275d8!important}a.text-primary:focus,a.text-primary:hover{color:#025aa5!important}.text-success{color:#5cb85c!important}a.text-success:focus,a.text-success:hover{color:#449d44!important}.text-info{color:#5bc0de!important}a.text-info:focus,a.text-info:hover{color:#31b0d5!important}.text-warning{color:#f0ad4e!important}a.text-warning:focus,a.text-warning:hover{color:#ec971f!important}.text-danger{color:#d9534f!important}a.text-danger:focus,a.text-danger:hover{color:#c9302c!important}.text-gray-dark{color:#292b2c!important}a.text-gray-dark:focus,a.text-gray-dark:hover{color:#101112!important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.invisible{visibility:hidden!important}.hidden-xs-up{display:none!important}@media (max-width:575px){.hidden-xs-down{display:none!important}}@media (min-width:576px){.hidden-sm-up{display:none!important}}@media (max-width:767px){.hidden-sm-down{display:none!important}}@media (min-width:768px){.hidden-md-up{display:none!important}}@media (max-width:991px){.hidden-md-down{display:none!important}}@media (min-width:992px){.hidden-lg-up{display:none!important}}@media (max-width:1199px){.hidden-lg-down{display:none!important}}@media (min-width:1200px){.hidden-xl-up{display:none!important}}.hidden-xl-down{display:none!important}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css.map b/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css.map deleted file mode 100644 index 74462f2c..00000000 --- a/tasks/chupiflow_ui/static/bootstrap/css/bootstrap.min.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["../../scss/_normalize.scss","bootstrap.css","../../scss/_print.scss","../../scss/_reboot.scss","../../scss/_variables.scss","../../scss/mixins/_hover.scss","../../scss/_type.scss","../../scss/mixins/_lists.scss","../../scss/_images.scss","../../scss/mixins/_image.scss","../../scss/mixins/_border-radius.scss","../../scss/_mixins.scss","../../scss/_code.scss","../../scss/_grid.scss","../../scss/mixins/_grid.scss","../../scss/mixins/_breakpoints.scss","../../scss/mixins/_grid-framework.scss","../../scss/_tables.scss","../../scss/mixins/_table-row.scss","../../scss/_forms.scss","../../scss/mixins/_forms.scss","../../scss/_buttons.scss","../../scss/mixins/_buttons.scss","../../scss/_transitions.scss","../../scss/_dropdown.scss","../../scss/mixins/_nav-divider.scss","../../scss/_button-group.scss","../../scss/_input-group.scss","../../scss/_custom-forms.scss","../../scss/_nav.scss","../../scss/_navbar.scss","../../scss/_card.scss","../../scss/mixins/_cards.scss","../../scss/_breadcrumb.scss","../../scss/mixins/_clearfix.scss","../../scss/_pagination.scss","../../scss/mixins/_pagination.scss","../../scss/_badge.scss","../../scss/mixins/_badge.scss","../../scss/_jumbotron.scss","../../scss/_alert.scss","../../scss/mixins/_alert.scss","../../scss/_progress.scss","../../scss/mixins/_gradients.scss","../../scss/_media.scss","../../scss/_list-group.scss","../../scss/mixins/_list-group.scss","../../scss/_responsive-embed.scss","../../scss/_close.scss","../../scss/_modal.scss","../../scss/_tooltip.scss","../../scss/mixins/_reset-text.scss","../../scss/_popover.scss","../../scss/_carousel.scss","../../scss/mixins/_transforms.scss","../../scss/utilities/_align.scss","../../scss/utilities/_background.scss","../../scss/mixins/_background-variant.scss","../../scss/utilities/_borders.scss","../../scss/utilities/_display.scss","../../scss/utilities/_flex.scss","../../scss/utilities/_float.scss","../../scss/mixins/_float.scss","../../scss/utilities/_position.scss","../../scss/utilities/_screenreaders.scss","../../scss/mixins/_screen-reader.scss","../../scss/utilities/_sizing.scss","../../scss/utilities/_spacing.scss","../../scss/utilities/_text.scss","../../scss/mixins/_text-truncate.scss","../../scss/mixins/_text-emphasis.scss","../../scss/mixins/_text-hide.scss","../../scss/utilities/_visibility.scss","../../scss/mixins/_visibility.scss"],"names":[],"mappings":";;;;;4EAYA,KACE,YAAA,WACA,YAAA,KACA,qBAAA,KACA,yBAAA,KAUF,KACE,OAAA,EAOF,QAAA,MAAA,OAAA,OAAA,IAAA,QAME,QAAA,MAQF,GACE,UAAA,IACA,OAAA,MAAA,EAWF,WAAA,OAAA,KAGE,QAAA,MAOF,OACE,OAAA,IAAA,KAQF,GACE,mBAAA,YAAA,WAAA,YACA,OAAA,EACA,SAAA,QAQF,IACE,YAAA,UAAA,UACA,UAAA,IAWF,EACE,iBAAA,YACA,6BAAA,QAQF,SAAA,QAEE,cAAA,EAQF,YACE,cAAA,KACA,gBAAA,UACA,gBAAA,UAAA,OAOF,EAAA,OAEE,YAAA,QAOF,EAAA,OAEE,YAAA,OAQF,KAAA,IAAA,KAGE,YAAA,UAAA,UACA,UAAA,IAOF,IACE,WAAA,OAOF,KACE,iBAAA,KACA,MAAA,KAOF,MACE,UAAA,IAQF,IAAA,IAEE,UAAA,IACA,YAAA,EACA,SAAA,SACA,eAAA,SAGF,IACE,OAAA,OAGF,IACE,IAAA,MAUF,MAAA,MAEE,QAAA,aAOF,sBACE,QAAA,KACA,OAAA,EAOF,IACE,aAAA,KAOF,eACE,SAAA,OAWF,OAAA,MAAA,SAAA,OAAA,SAKE,YAAA,WACA,UAAA,KACA,YAAA,KACA,OAAA,EAQF,OAAA,MAEE,SAAA,QAQF,OAAA,OAEE,eAAA,KASF,aAAA,cAAA,OAAA,mBAIE,mBAAA,OAOF,gCAAA,+BAAA,gCAAA,yBAIE,aAAA,KACA,QAAA,EAOF,6BAAA,4BAAA,6BAAA,sBAIE,QAAA,IAAA,OAAA,WAOF,SACE,OAAA,IAAA,MAAA,OACA,OAAA,EAAA,IACA,QAAA,MAAA,OAAA,MAUF,OACE,mBAAA,WAAA,WAAA,WACA,MAAA,QACA,QAAA,MACA,UAAA,KACA,QAAA,EACA,YAAA,OAQF,SACE,QAAA,aACA,eAAA,SAOF,SACE,SAAA,KC/JF,gBAAA,aDyKE,mBAAA,WAAA,WAAA,WACA,QAAA,ECpKF,yCAAA,yCD6KE,OAAA,KCxKF,cDiLE,mBAAA,UACA,eAAA,KC7KF,4CAAA,yCDsLE,mBAAA,KAQF,6BACE,mBAAA,OACA,KAAA,QAWF,QAAA,KAEE,QAAA,MAOF,QACE,QAAA,UAUF,OACE,QAAA,aAOF,SACE,QAAA,KC7MF,SDwNE,QAAA,KEhcA,aACE,EAAA,QAAA,SAAA,yBAAA,uBAAA,kBAAA,gBAAA,iBAAA,eAAA,gBAAA,cAcE,YAAA,eAEA,mBAAA,eAAA,WAAA,eAGF,EAAA,UAEE,gBAAA,UAQF,mBACE,QAA6B,KAA7B,YAA6B,IAc/B,IACE,YAAA,mBAEF,WAAA,IAEE,OAAA,IAAA,MAAA,KACA,kBAAA,MAQF,MACE,QAAA,mBAGF,IAAA,GAEE,kBAAA,MAGF,GAAA,GAAA,EAGE,QAAA,EACA,OAAA,EAGF,GAAA,GAEE,iBAAA,MAMF,QACE,QAAA,KAEF,OACE,OAAA,IAAA,MAAA,KAGF,OACE,gBAAA,mBADF,UAAA,UAKI,iBAAA,eAGJ,mBAAA,mBAGI,OAAA,IAAA,MAAA,gBC3FR,KACE,mBAAA,WAAA,WAAA,WAGF,EAAA,QAAA,SAGE,mBAAA,QAAA,WAAA,QAoBA,cAAgB,MAAA,aAQlB,KAYE,mBAAA,UAGA,4BAAA,YAGF,KACE,YAAA,cAAA,UAAA,mBAAA,WAAA,OC2K4H,iBD3K5H,MAAA,WACA,UAAA,KACA,YAAA,IACA,YAAA,IAEA,MAAA,QAEA,iBAAA,KFmQF,sBE1PE,QAAA,YAYF,GAAI,GAAI,GAAI,GAAI,GAAI,GAClB,WAAA,EACA,cAAA,MAOF,EACE,WAAA,EACA,cAAA,KAIF,0BAAA,YAGE,OAAA,KAGF,QACE,cAAA,KACA,WAAA,OACA,YAAA,QAGF,GAAA,GAAA,GAGE,WAAA,EACA,cAAA,KAGF,MAAA,MAAA,MAAA,MAIE,cAAA,EAGF,GACE,YAAA,IAGF,GACE,cAAA,MACA,YAAA,EAGF,WACE,OAAA,EAAA,EAAA,KAQF,EACE,MAAA,QACA,gBAAA,KEhJE,QAAA,QFmJA,MAAA,QACA,gBAAA,UAUJ,8BACE,MAAA,QACA,gBAAA,KEhKE,oCAAA,oCFmKA,MAAA,QACA,gBAAA,KANJ,oCAUI,QAAA,EASJ,IAEE,WAAA,EAEA,cAAA,KAEA,SAAA,KAQF,OAGE,OAAA,EAAA,EAAA,KAQF,IAGE,eAAA,OF8MF,cEjME,OAAA,QAcF,cAAA,EAAA,KAAA,OAAA,MAAA,MAAA,OAAA,QAAA,SASE,iBAAA,aAAA,aAAA,aAQF,MAEE,gBAAA,SAEA,iBAAA,YAGF,QACE,YAAA,OACA,eAAA,OACA,MAAA,QACA,WAAA,KACA,aAAA,OAGF,GAEE,WAAA,KAQF,MAEE,QAAA,aACA,cAAA,MAOF,aACE,QAAA,IAAA,OACA,QAAA,IAAA,KAAA,yBAGF,OAAA,MAAA,OAAA,SAME,YAAA,QAGF,8BAAA,2BAMI,OAAA,YAKJ,iBAAA,iBAAA,2BAAA,kBASE,mBAAA,QAGF,SAEE,OAAA,SAGF,SAME,UAAA,EAEA,QAAA,EACA,OAAA,EACA,OAAA,EAGF,OAEE,QAAA,MACA,MAAA,KACA,QAAA,EACA,cAAA,MACA,UAAA,OACA,YAAA,QAGF,mBAKE,mBAAA,KAIF,OACE,QAAA,aF8IF,SEtIE,QAAA,eG/XF,IAAK,IAAK,IAAK,IAAK,IAAK,IAAzB,GAAI,GAAI,GAAI,GAAI,GAAI,GAElB,cAAA,MACA,YAAA,QACA,YAAA,IACA,YAAA,IACA,MAAA,QAGE,IAAJ,GAAU,UAAA,OACN,IAAJ,GAAU,UAAA,KACN,IAAJ,GAAU,UAAA,QACN,IAAJ,GAAU,UAAA,OACN,IAAJ,GAAU,UAAA,QACN,IAAJ,GAAU,UAAA,KAEV,MACE,UAAA,QACA,YAAA,IAIF,WACE,UAAA,KACA,YAAA,IACA,YAAA,IAEF,WACE,UAAA,OACA,YAAA,IACA,YAAA,IAEF,WACE,UAAA,OACA,YAAA,IACA,YAAA,IAEF,WACE,UAAA,OACA,YAAA,IACA,YAAA,IAQF,GACE,WAAA,KACA,cAAA,KACA,OAAA,EACA,WAAA,IAAA,MAAA,eAQF,OAAA,MAEE,UAAA,IACA,YAAA,IAGF,MAAA,KAEE,QAAA,KACA,iBAAA,QAQF,eC7EE,aAAA,EACA,WAAA,KDiFF,aClFE,aAAA,EACA,WAAA,KDoFF,kBACE,QAAA,aADF,mCAII,aAAA,IAUJ,YACE,UAAA,IACA,eAAA,UAIF,YACE,QAAA,MAAA,KACA,cAAA,KACA,UAAA,QACA,YAAA,OAAA,MAAA,QAGF,mBACE,QAAA,MACA,UAAA,IACA,MAAA,QAHF,2BAMI,QAAsB,cAK1B,oBACE,cAAA,KACA,aAAA,EACA,WAAA,MACA,aAAA,OAAA,MAAA,QACA,YAAA,EAGF,+CAEI,QAAW,GAFf,8CAKI,QAAsB,cErI1B,WCIE,UAAA,KAGA,OAAA,KDDF,eACE,QAAA,OACA,iBAAA,KACA,OAAA,IAAA,MAAA,KEZE,cAAA,OCWE,mBAAA,IAAA,IAAA,YAAA,cAAA,IAAA,IAAA,YAAA,WAAA,IAAA,IAAA,YFJJ,UAAA,KAGA,OAAA,KDeF,QAEE,QAAA,aAGF,YACE,cAAA,MACA,YAAA,EAGF,gBACE,UAAA,IACA,MAAA,QIxCF,KAAA,IAAA,IAAA,KAIE,YAAA,MAAA,OAAA,SAAA,kBRmP2F,cQnP3F,UAIF,KACE,QAAA,MAAA,MACA,UAAA,IACA,MAAA,QACA,iBAAA,QFTE,cAAA,OEaF,OACE,QAAA,EACA,MAAA,QACA,iBAAA,QAKJ,IACE,QAAA,MAAA,MACA,UAAA,IACA,MAAA,KACA,iBAAA,QFzBE,cAAA,MEqBJ,QASI,QAAA,EACA,UAAA,KACA,YAAA,IAMJ,IACE,QAAA,MACA,WAAA,EACA,cAAA,KACA,UAAA,IACA,MAAA,QALF,SASI,QAAA,EACA,UAAA,QACA,MAAA,QACA,iBAAA,YACA,cAAA,EAKJ,gBACE,WAAA,MACA,WAAA,OCzDA,WCAA,SAAA,SACA,YAAA,KACA,aAAA,KAKI,cAAA,KACA,aAAA,KC2CF,yBFnDF,WCOI,cAAA,KACA,aAAA,MC2CF,yBFnDF,WCOI,cAAA,KACA,aAAA,MC2CF,yBFnDF,WCOI,cAAA,KACA,aAAA,MC2CF,0BFnDF,WCOI,cAAA,KACA,aAAA,MC2CF,yBFnDF,WCkBI,MAAA,MACA,UAAA,MCgCF,yBFnDF,WCkBI,MAAA,MACA,UAAA,MCgCF,yBFnDF,WCkBI,MAAA,MACA,UAAA,MCgCF,0BFnDF,WCkBI,MAAA,OACA,UAAA,MDPJ,iBCZA,SAAA,SACA,YAAA,KACA,aAAA,KAKI,cAAA,KACA,aAAA,KC2CF,yBFvCF,iBCLI,cAAA,KACA,aAAA,MC2CF,yBFvCF,iBCLI,cAAA,KACA,aAAA,MC2CF,yBFvCF,iBCLI,cAAA,KACA,aAAA,MC2CF,0BFvCF,iBCLI,cAAA,KACA,aAAA,MDcJ,KCaA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,KAAA,cAAA,KAAA,UAAA,KAKI,aAAA,MACA,YAAA,MCSF,yBF7BF,KCmBI,aAAA,MACA,YAAA,OCSF,yBF7BF,KCmBI,aAAA,MACA,YAAA,OCSF,yBF7BF,KCmBI,aAAA,MACA,YAAA,OCSF,0BF7BF,KCmBI,aAAA,MACA,YAAA,ODdJ,YACE,aAAA,EACA,YAAA,EAFF,iBAAA,0BAMI,cAAA,EACA,aAAA,EGjCJ,KAAA,OAAA,QAAA,QAAA,QAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UACE,SAAA,SACA,MAAA,KACA,WAAA,IFuBE,cAAA,KACA,aAAA,KCsBF,yBCjDF,KAAA,OAAA,QAAA,QAAA,QAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UF0BI,cAAA,KACA,aAAA,MCsBF,yBCjDF,KAAA,OAAA,QAAA,QAAA,QAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UF0BI,cAAA,KACA,aAAA,MCsBF,yBCjDF,KAAA,OAAA,QAAA,QAAA,QAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UF0BI,cAAA,KACA,aAAA,MCsBF,0BCjDF,KAAA,OAAA,QAAA,QAAA,QAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,OAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,QAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UF0BI,cAAA,KACA,aAAA,MEJA,KACE,mBAAA,EAAA,wBAAA,EAAA,WAAA,EACA,iBAAA,EAAA,kBAAA,EAAA,kBAAA,EAAA,UAAA,EACA,UAAA,KAEF,UACE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,MAAA,KAIA,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,UAAA,SAAA,EAAA,EAAA,UAAA,KAAA,EAAA,EAAA,UAKA,UAAA,UElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,OF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,QF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,QF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,QF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAKA,UAAA,KE3BQ,QFuCR,MAAA,KEvCQ,QFuCR,MAAA,UEvCQ,QFuCR,MAAA,WEvCQ,QFuCR,MAAA,IEvCQ,QFuCR,MAAA,WEvCQ,QFuCR,MAAA,WEvCQ,QFuCR,MAAA,IEvCQ,QFuCR,MAAA,WEvCQ,QFuCR,MAAA,WEvCQ,QFuCR,MAAA,IEvCQ,SFuCR,MAAA,WEvCQ,SFuCR,MAAA,WEvCQ,SFuCR,MAAA,KEvCQ,QFmCR,KAAA,KEnCQ,QFmCR,KAAA,UEnCQ,QFmCR,KAAA,WEnCQ,QFmCR,KAAA,IEnCQ,QFmCR,KAAA,WEnCQ,QFmCR,KAAA,WEnCQ,QFmCR,KAAA,IEnCQ,QFmCR,KAAA,WEnCQ,QFmCR,KAAA,WEnCQ,QFmCR,KAAA,IEnCQ,SFmCR,KAAA,WEnCQ,SFmCR,KAAA,WEnCQ,SFmCR,KAAA,KE1BQ,UFsBR,YAAA,UEtBQ,UFsBR,YAAA,WEtBQ,UFsBR,YAAA,IEtBQ,UFsBR,YAAA,WEtBQ,UFsBR,YAAA,WEtBQ,UFsBR,YAAA,IEtBQ,UFsBR,YAAA,WEtBQ,UFsBR,YAAA,WEtBQ,UFsBR,YAAA,IEtBQ,WFsBR,YAAA,WEtBQ,WFsBR,YAAA,WCvBE,yBC1BE,QACE,mBAAA,EAAA,wBAAA,EAAA,WAAA,EACA,iBAAA,EAAA,kBAAA,EAAA,kBAAA,EAAA,UAAA,EACA,UAAA,KAEF,aACE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,MAAA,KAIA,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,UAAA,SAAA,EAAA,EAAA,UAAA,KAAA,EAAA,EAAA,UAKA,UAAA,UElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAKA,UAAA,KE3BQ,WFuCR,MAAA,KEvCQ,WFuCR,MAAA,UEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,KEvCQ,WFmCR,KAAA,KEnCQ,WFmCR,KAAA,UEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,KE1BQ,aFsBR,YAAA,EEtBQ,aFsBR,YAAA,UEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,cFsBR,YAAA,WEtBQ,cFsBR,YAAA,YCvBE,yBC1BE,QACE,mBAAA,EAAA,wBAAA,EAAA,WAAA,EACA,iBAAA,EAAA,kBAAA,EAAA,kBAAA,EAAA,UAAA,EACA,UAAA,KAEF,aACE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,MAAA,KAIA,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,UAAA,SAAA,EAAA,EAAA,UAAA,KAAA,EAAA,EAAA,UAKA,UAAA,UElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAKA,UAAA,KE3BQ,WFuCR,MAAA,KEvCQ,WFuCR,MAAA,UEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,KEvCQ,WFmCR,KAAA,KEnCQ,WFmCR,KAAA,UEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,KE1BQ,aFsBR,YAAA,EEtBQ,aFsBR,YAAA,UEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,cFsBR,YAAA,WEtBQ,cFsBR,YAAA,YCvBE,yBC1BE,QACE,mBAAA,EAAA,wBAAA,EAAA,WAAA,EACA,iBAAA,EAAA,kBAAA,EAAA,kBAAA,EAAA,UAAA,EACA,UAAA,KAEF,aACE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,MAAA,KAIA,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,UAAA,SAAA,EAAA,EAAA,UAAA,KAAA,EAAA,EAAA,UAKA,UAAA,UElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAKA,UAAA,KE3BQ,WFuCR,MAAA,KEvCQ,WFuCR,MAAA,UEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,KEvCQ,WFmCR,KAAA,KEnCQ,WFmCR,KAAA,UEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,KE1BQ,aFsBR,YAAA,EEtBQ,aFsBR,YAAA,UEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,cFsBR,YAAA,WEtBQ,cFsBR,YAAA,YCvBE,0BC1BE,QACE,mBAAA,EAAA,wBAAA,EAAA,WAAA,EACA,iBAAA,EAAA,kBAAA,EAAA,kBAAA,EAAA,UAAA,EACA,UAAA,KAEF,aACE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,MAAA,KAIA,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,UAAA,SAAA,EAAA,EAAA,UAAA,KAAA,EAAA,EAAA,UAKA,UAAA,UElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,UF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,IAAA,SAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,IAKA,UAAA,IElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,WAAA,SAAA,EAAA,EAAA,WAAA,KAAA,EAAA,EAAA,WAKA,UAAA,WElCM,WF6BN,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAKA,UAAA,KE3BQ,WFuCR,MAAA,KEvCQ,WFuCR,MAAA,UEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,WEvCQ,WFuCR,MAAA,IEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,WEvCQ,YFuCR,MAAA,KEvCQ,WFmCR,KAAA,KEnCQ,WFmCR,KAAA,UEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,WEnCQ,WFmCR,KAAA,IEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,WEnCQ,YFmCR,KAAA,KE1BQ,aFsBR,YAAA,EEtBQ,aFsBR,YAAA,UEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,WEtBQ,aFsBR,YAAA,IEtBQ,cFsBR,YAAA,WEtBQ,cFsBR,YAAA,YG3EF,OACE,MAAA,KACA,UAAA,KACA,cAAA,KAHF,UAAA,UAOI,QAAA,OACA,eAAA,IACA,WAAA,IAAA,MAAA,QATJ,gBAaI,eAAA,OACA,cAAA,IAAA,MAAA,QAdJ,mBAkBI,WAAA,IAAA,MAAA,QAlBJ,cAsBI,iBAAA,KASJ,aAAA,aAGI,QAAA,MASJ,gBACE,OAAA,IAAA,MAAA,QADF,mBAAA,mBAKI,OAAA,IAAA,MAAA,QALJ,yBAAA,yBAWM,oBAAA,IAUN,yCAEI,iBAAA,gBASJ,4BAGM,iBAAA,iBC7EJ,cAAA,iBAAA,iBAII,iBAAA,iBAMJ,iCAKM,iBAAA,iBALN,oCAAA,oCASQ,iBAAA,iBAnBR,eAAA,kBAAA,kBAII,iBAAA,QAMJ,kCAKM,iBAAA,QALN,qCAAA,qCASQ,iBAAA,QAnBR,YAAA,eAAA,eAII,iBAAA,QAMJ,+BAKM,iBAAA,QALN,kCAAA,kCASQ,iBAAA,QAnBR,eAAA,kBAAA,kBAII,iBAAA,QAMJ,kCAKM,iBAAA,QALN,qCAAA,qCASQ,iBAAA,QAnBR,cAAA,iBAAA,iBAII,iBAAA,QAMJ,iCAKM,iBAAA,QALN,oCAAA,oCASQ,iBAAA,QDiFV,kBAEI,MAAA,KACA,iBAAA,QAIJ,kBAEI,MAAA,QACA,iBAAA,QAIJ,eACE,MAAA,KACA,iBAAA,QAFF,kBAAA,kBAAA,wBAOI,aAAA,KAPJ,8BAWI,OAAA,EAYJ,kBACE,QAAA,MACA,MAAA,KACA,WAAA,KACA,mBAAA,yBAJF,iCAQI,OAAA,EEhJJ,cACE,QAAA,MACA,MAAA,KAGA,QAAA,MAAA,OACA,UAAA,KACA,YAAA,KACA,MAAA,QACA,iBAAA,KAEA,iBAAA,KACA,wBAAA,YAAA,gBAAA,YACA,OAAA,IAAA,MAAA,gBAKE,cAAA,ORTE,mBAAA,aAAA,YAAA,KAAA,mBAAA,YAAA,KAAA,WAAA,aAAA,YAAA,KAAA,mBAAA,YAAA,KAAA,cAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KAAA,WAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KAAA,WAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KAAA,mBAAA,YAAA,KQTN,0BA6BI,iBAAA,YACA,OAAA,ECSF,oBACE,MAAA,QACA,iBAAA,KACA,aAAA,QACA,QAAA,ED3CJ,yCAsCI,MAAA,QAEA,QAAA,EAxCJ,gCAsCI,MAAA,QAEA,QAAA,EAxCJ,oCAsCI,MAAA,QAEA,QAAA,EAxCJ,2BAsCI,MAAA,QAEA,QAAA,EAxCJ,uBAAwB,wBAkDpB,iBAAA,QAEA,QAAA,EApDJ,uBAwDI,OAAA,YAIJ,gDAGI,OAAA,oBAHJ,qCAYI,MAAA,QACA,iBAAA,KAKJ,mBAAA,oBAEE,QAAA,MAUF,gBACE,YAAA,sBACA,eAAA,sBACA,cAAA,EAGF,mBACE,YAAA,uBACA,eAAA,uBACA,UAAA,QAGF,mBACE,YAAA,uBACA,eAAA,uBACA,UAAA,QAUF,iBACE,YAAA,MACA,eAAA,MACA,cAAA,EACA,UAAA,KASF,qBACE,YAAA,MACA,eAAA,MACA,cAAA,EACA,YAAA,KACA,OAAA,MAAA,YACA,aAAA,IAAA,EAN6D,qCAA/D,qCAAqG,kDAArG,uDAAA,0DAAsC,kDAAtC,uDAAA,0DAUI,cAAA,EACA,aAAA,EAaJ,iBAAkB,8BAAlB,mCAAA,sCACE,QAAA,OAAA,MACA,UAAA,QT5JE,cAAA,MSgKJ,wEAAoD,gEAApD,qEAAA,mDAEI,OAAA,UAIJ,iBAAkB,8BAAlB,mCAAA,sCACE,QAAA,OAAA,OACA,UAAA,QTxKE,cAAA,MS4KJ,wEAAoD,gEAApD,qEAAA,mDAEI,OAAA,YAUJ,YACE,cAAA,KAGF,WACE,QAAA,MACA,WAAA,OAQF,YACE,SAAA,SACA,QAAA,MACA,cAAA,MAHF,uCAOM,MAAA,QACA,OAAA,YAKN,kBACE,aAAA,QACA,cAAA,EACA,OAAA,QAGF,kBACE,SAAA,SACA,WAAA,OACA,YAAA,SAHF,6BAMI,SAAA,OAKJ,mBACE,QAAA,aADF,qCAII,eAAA,OAJJ,sCAQI,YAAA,OASJ,uBACE,WAAA,OAGF,qBAAA,sBAAA,sBAGE,cAAA,QACA,kBAAA,UACA,oBAAA,OAAA,MAAA,SACA,wBAAA,SAAA,SAAA,gBAAA,SAAA,SC5PA,6BAAA,6BAAA,+BAAA,oCAAA,iCAKE,MAAA,QAIF,2BACE,aAAA,QAQF,gCACE,MAAA,QACA,aAAA,QACA,iBAAA,QD2OJ,mCAII,iBAAA,wPCpQF,6BAAA,6BAAA,+BAAA,oCAAA,iCAKE,MAAA,QAIF,2BACE,aAAA,QAQF,gCACE,MAAA,QACA,aAAA,QACA,iBAAA,KDmPJ,mCAII,iBAAA,iUC5QF,4BAAA,4BAAA,8BAAA,mCAAA,gCAKE,MAAA,QAIF,0BACE,aAAA,QAQF,+BACE,MAAA,QACA,aAAA,QACA,iBAAA,QD2PJ,iCAII,iBAAA,kSAcJ,aACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,IAAA,KAAA,cAAA,IAAA,KAAA,UAAA,IAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAHF,yBASI,MAAA,KJ1PA,yBIiPF,mBAeI,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OACA,cAAA,EAlBJ,yBAuBI,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,kBAAA,IAAA,KAAA,cAAA,IAAA,KAAA,UAAA,IAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,cAAA,EA3BJ,2BAgCI,QAAA,aACA,MAAA,KACA,eAAA,OAlCJ,kCAuCI,QAAA,aAvCJ,0BA2CI,MAAA,KA3CJ,iCA+CI,cAAA,EACA,eAAA,OAhDJ,yBAsDI,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OACA,MAAA,KACA,WAAA,EACA,cAAA,EA3DJ,+BA8DI,aAAA,EA9DJ,+BAiEI,SAAA,SACA,WAAA,EACA,aAAA,OACA,YAAA,EApEJ,6BAyEI,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OACA,aAAA,EA5EJ,uCA+EI,SAAA,OACA,QAAA,aACA,aAAA,OACA,eAAA,YAlFJ,kDAuFI,IAAA,GE1XN,KACE,QAAA,aACA,YAAA,IACA,YAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,OACA,oBAAA,KAAA,iBAAA,KAAA,gBAAA,KAAA,YAAA,KACA,OAAA,IAAA,MAAA,YCoEA,QAAA,MAAA,KACA,UAAA,KZ/EE,cAAA,OCWE,mBAAA,IAAA,IAAA,YAAA,cAAA,IAAA,IAAA,YAAA,WAAA,IAAA,IAAA,YNKF,WAAA,WgBAA,gBAAA,KAdQ,WAAZ,WAkBI,QAAA,EACA,mBAAA,EAAA,EAAA,EAAA,IAAA,oBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,oBAnBJ,cAAe,cAyBX,OAAA,YACA,QAAA,IA1BS,YAAb,YAgCI,iBAAA,KAMJ,eAAA,yBAEE,eAAA,KAQF,aC7CE,MAAA,KACA,iBAAA,QACA,aAAA,QjBDE,mBiBMA,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBAAA,mBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAKJ,sBAAA,sBAEE,iBAAA,QACA,aAAA,QAGF,oBAAA,oBAAA,mCAGE,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QDcJ,eChDE,MAAA,QACA,iBAAA,KACA,aAAA,KjBDE,qBiBMA,MAAA,QACA,iBAAA,QACA,aAAA,QAEF,qBAAA,qBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,qBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,qBAKJ,wBAAA,wBAEE,iBAAA,KACA,aAAA,KAGF,sBAAA,sBAAA,qCAGE,MAAA,QACA,iBAAA,QACA,iBAAA,KACA,aAAA,QDiBJ,UCnDE,MAAA,KACA,iBAAA,QACA,aAAA,QjBDE,gBiBMA,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,gBAAA,gBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,oBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,oBAKJ,mBAAA,mBAEE,iBAAA,QACA,aAAA,QAGF,iBAAA,iBAAA,gCAGE,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QDoBJ,aCtDE,MAAA,KACA,iBAAA,QACA,aAAA,QjBDE,mBiBMA,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBAAA,mBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAKJ,sBAAA,sBAEE,iBAAA,QACA,aAAA,QAGF,oBAAA,oBAAA,mCAGE,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QDuBJ,aCzDE,MAAA,KACA,iBAAA,QACA,aAAA,QjBDE,mBiBMA,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,mBAAA,mBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,oBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,oBAKJ,sBAAA,sBAEE,iBAAA,QACA,aAAA,QAGF,oBAAA,oBAAA,mCAGE,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QD0BJ,YC5DE,MAAA,KACA,iBAAA,QACA,aAAA,QjBDE,kBiBMA,MAAA,KACA,iBAAA,QACA,aAAA,QAEF,kBAAA,kBAMI,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAKJ,qBAAA,qBAEE,iBAAA,QACA,aAAA,QAGF,mBAAA,mBAAA,kCAGE,MAAA,KACA,iBAAA,QACA,iBAAA,KACA,aAAA,QD+BJ,qBCzBE,MAAA,QACA,iBAAA,KACA,iBAAA,YACA,aAAA,QjB1CE,2BiB6CA,MAAA,KACA,iBAAA,QACA,aAAA,QAGF,2BAAA,2BAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAGF,8BAAA,8BAEE,MAAA,QACA,iBAAA,YAGF,4BAAA,4BAAA,2CAGE,MAAA,KACA,iBAAA,QACA,aAAA,QDCJ,uBC5BE,MAAA,KACA,iBAAA,KACA,iBAAA,YACA,aAAA,KjB1CE,6BiB6CA,MAAA,KACA,iBAAA,KACA,aAAA,KAGF,6BAAA,6BAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,qBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,qBAGF,gCAAA,gCAEE,MAAA,KACA,iBAAA,YAGF,8BAAA,8BAAA,6CAGE,MAAA,KACA,iBAAA,KACA,aAAA,KDIJ,kBC/BE,MAAA,QACA,iBAAA,KACA,iBAAA,YACA,aAAA,QjB1CE,wBiB6CA,MAAA,KACA,iBAAA,QACA,aAAA,QAGF,wBAAA,wBAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,oBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,oBAGF,2BAAA,2BAEE,MAAA,QACA,iBAAA,YAGF,yBAAA,yBAAA,wCAGE,MAAA,KACA,iBAAA,QACA,aAAA,QDOJ,qBClCE,MAAA,QACA,iBAAA,KACA,iBAAA,YACA,aAAA,QjB1CE,2BiB6CA,MAAA,KACA,iBAAA,QACA,aAAA,QAGF,2BAAA,2BAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAGF,8BAAA,8BAEE,MAAA,QACA,iBAAA,YAGF,4BAAA,4BAAA,2CAGE,MAAA,KACA,iBAAA,QACA,aAAA,QDUJ,qBCrCE,MAAA,QACA,iBAAA,KACA,iBAAA,YACA,aAAA,QjB1CE,2BiB6CA,MAAA,KACA,iBAAA,QACA,aAAA,QAGF,2BAAA,2BAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,oBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,oBAGF,8BAAA,8BAEE,MAAA,QACA,iBAAA,YAGF,4BAAA,4BAAA,2CAGE,MAAA,KACA,iBAAA,QACA,aAAA,QDaJ,oBCxCE,MAAA,QACA,iBAAA,KACA,iBAAA,YACA,aAAA,QjB1CE,0BiB6CA,MAAA,KACA,iBAAA,QACA,aAAA,QAGF,0BAAA,0BAEE,mBAAA,EAAA,EAAA,EAAA,IAAA,mBAAA,WAAA,EAAA,EAAA,EAAA,IAAA,mBAGF,6BAAA,6BAEE,MAAA,QACA,iBAAA,YAGF,2BAAA,2BAAA,0CAGE,MAAA,KACA,iBAAA,QACA,aAAA,QDuBJ,UACE,YAAA,IACA,MAAA,QACA,cAAA,EAHF,UAA6B,iBAAlB,iBAAoC,mBAS3C,iBAAA,YATJ,UAA4B,iBAAjB,gBAeP,aAAA,YhBxGA,gBgB2GA,aAAA,YhBjGA,gBAAA,gBgBoGA,MAAA,QACA,gBAAA,UACA,iBAAA,YAvBJ,mBA0BI,MAAA,QhBzGA,yBAAA,yBgB4GE,gBAAA,KAUG,mBAAT,QCxDE,QAAA,OAAA,OACA,UAAA,QZ/EE,cAAA,MW0IK,mBAAT,QC5DE,QAAA,OAAA,MACA,UAAA,QZ/EE,cAAA,MWoJJ,WACE,QAAA,MACA,MAAA,KAIF,sBACE,WAAA,MAIF,6BAAA,4BAAA,6BAII,MAAA,KEvKJ,MACE,QAAA,EZcI,mBAAA,QAAA,KAAA,OAAA,cAAA,QAAA,KAAA,OAAA,WAAA,QAAA,KAAA,OYfN,WAKI,QAAA,EAIJ,UACE,QAAA,KADF,eAGI,QAAA,MAIJ,iBAEI,QAAA,UAIJ,oBAEI,QAAA,gBAIJ,YACE,SAAA,SACA,OAAA,EACA,SAAA,OZhBI,mBAAA,OAAA,KAAA,KAAA,cAAA,OAAA,KAAA,KAAA,WAAA,OAAA,KAAA,KadN,UAAA,QAEE,SAAA,SAGF,wBAGI,QAAA,aACA,MAAA,EACA,OAAA,EACA,YAAA,KACA,eAAA,OACA,QAAW,GACX,WAAA,KAAA,MACA,aAAA,KAAA,MAAA,YACA,YAAA,KAAA,MAAA,YAXJ,uBAgBI,QAAA,EAIJ,gCAGM,WAAA,EACA,cAAA,KAAA,MAMN,eACE,SAAA,SACA,IAAA,KACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,MAAA,KACA,UAAA,MACA,QAAA,MAAA,EACA,OAAA,QAAA,EAAA,EACA,UAAA,KACA,MAAA,QACA,WAAA,KACA,WAAA,KACA,iBAAA,KACA,wBAAA,YAAA,gBAAA,YACA,OAAA,IAAA,MAAA,gBdhDE,cAAA,OcsDJ,kBCrDE,OAAA,IACA,OAAA,MAAA,EACA,SAAA,OACA,iBAAA,QDyDF,eACE,QAAA,MACA,MAAA,KACA,QAAA,IAAA,OACA,MAAA,KACA,YAAA,IACA,MAAA,QACA,WAAA,QACA,YAAA,OACA,WAAA,IACA,OAAA,EnBvDE,qBAAA,qBmB0DA,MAAA,QACA,gBAAA,KACA,iBAAA,QAfJ,sBAAuB,sBAoBnB,MAAA,KACA,gBAAA,KACA,iBAAA,QAtBJ,wBAAyB,wBA2BrB,MAAA,QACA,OAAA,YACA,iBAAA,YASJ,qBAGI,QAAA,MAHJ,QAQI,QAAA,EAQJ,qBACE,MAAA,EACA,KAAA,KAGF,oBACE,MAAA,KACA,KAAA,EAIF,iBACE,QAAA,MACA,QAAA,MAAA,OACA,cAAA,EACA,UAAA,QACA,MAAA,QACA,YAAA,OAIF,mBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,IAOF,uBAGI,IAAA,KACA,OAAA,KACA,cAAA,QE3JJ,WAAA,oBAEE,SAAA,SACA,QAAA,mBAAA,QAAA,oBAAA,QAAA,mBAAA,QAAA,YACA,eAAA,OAJF,yBAAA,gBAOI,SAAA,SACA,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KARJ,+BAAA,sBAaM,QAAA,EAbN,gCAAA,gCAAA,+BAAmD,uBAA1B,uBAAzB,sBAkBM,QAAA,EAlBN,qBAAA,2BAAA,2BAAA,iCAAA,8BAAA,oCAAA,oCAAA,0CA2BI,YAAA,KAKJ,aACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,iBAAA,MAAA,wBAAA,WAAA,cAAA,MAAA,gBAAA,WAFF,0BAKI,MAAA,KAIJ,yEACE,cAAA,EAIF,4BACE,YAAA,EADF,mEhBhCI,2BAAA,EACA,wBAAA,EgBuCJ,6CAAA,8ChB1BI,0BAAA,EACA,uBAAA,EgB+BJ,sBACE,MAAA,KAEF,8DACE,cAAA,EAEF,mEAAA,oEhBpDI,2BAAA,EACA,wBAAA,EgByDJ,oEhB5CI,0BAAA,EACA,uBAAA,EgBgDJ,mCAAA,iCAEE,QAAA,EAgBF,4BACE,cAAA,OACA,aAAA,OAFF,mCAKI,YAAA,EAI8B,0CAAlC,+BACE,cAAA,QACA,aAAA,QAGgC,0CAAlC,+BACE,cAAA,SACA,aAAA,SAoBF,oBACE,QAAA,mBAAA,QAAA,oBAAA,QAAA,mBAAA,QAAA,YACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,kBAAA,MAAA,oBAAA,WAAA,eAAA,MAAA,YAAA,WACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OAJF,yBAAA,+BAQI,MAAA,KARJ,8BAAA,oCAAA,oCAAA,0CAeI,WAAA,KACA,YAAA,EAIJ,4DAEI,cAAA,EAFJ,sDhBlII,2BAAA,EACA,0BAAA,EgBiIJ,sDhBhJI,wBAAA,EACA,uBAAA,EgB0JJ,uEACE,cAAA,EAEF,4EAAA,6EhBhJI,2BAAA,EACA,0BAAA,EgBqJJ,6EhBpKI,wBAAA,EACA,uBAAA,ET0gGJ,gDAAA,6CAAA,2DAAA,wDyBj1FM,SAAA,SACA,KAAA,cACA,eAAA,KClMN,aACE,SAAA,SACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,MAAA,KAHF,2BAQI,SAAA,SACA,QAAA,EACA,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KAGA,MAAA,GACA,cAAA,EAd8B,kCAAlC,iCAAqE,iCAkB/D,QAAA,EAKN,2BAAA,mBAAA,iBAIE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OANF,8DAAA,sDAAA,oDjBvBI,cAAA,EiBoCJ,mBAAA,iBAEE,YAAA,OACA,eAAA,OAyBF,mBACE,QAAA,MAAA,OACA,cAAA,EACA,UAAA,KACA,YAAA,IACA,YAAA,KACA,MAAA,QACA,WAAA,OACA,iBAAA,QACA,OAAA,IAAA,MAAA,gBjBzEE,cAAA,OiBgEJ,mCAAA,mCAAA,wDAcI,QAAA,OAAA,MACA,UAAA,QjB/EA,cAAA,MiBgEJ,mCAAA,mCAAA,wDAmBI,QAAA,OAAA,OACA,UAAA,QjBpFA,cAAA,MiBgEJ,wCAAA,qCA4BI,WAAA,EAUJ,4CAAA,oCAAA,oEAAA,+EAAA,uCAAA,kDAAA,mDjBzFI,2BAAA,EACA,wBAAA,EiBiGJ,oCACE,aAAA,EAEF,6CAAA,qCAAA,wCAAA,mDAAA,oDAAA,oEAAA,yDjBvFI,0BAAA,EACA,uBAAA,EiB+FJ,mDACE,YAAA,EAOF,iBACE,SAAA,SAGA,UAAA,EACA,YAAA,OALF,sBAUI,SAAA,SAEA,iBAAA,EAAA,aAAA,EAAA,EAAA,GAAA,SAAA,EAAA,EAAA,GAAA,KAAA,EAAA,EAAA,GAZJ,2BAeM,YAAA,KAfyB,6BAA/B,4BAA+D,4BAoBzD,QAAA,EApBN,uCAAA,6CA4BM,aAAA,KA5BN,wCAAA,8CAkCM,QAAA,EACA,YAAA,KAnCN,qDAAA,oDAAA,oDAAiD,+CAAjD,8CAAmG,8CAsC3F,QAAA,EClKR,gBACE,SAAA,SACA,QAAA,mBAAA,QAAA,oBAAA,QAAA,mBAAA,QAAA,YACA,WAAA,OACA,aAAA,OACA,aAAA,KACA,OAAA,QAGF,sBACE,SAAA,SACA,QAAA,GACA,QAAA,EAHF,wDAMI,MAAA,KACA,iBAAA,QAPJ,sDAaI,mBAAA,EAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,EAAA,IAAA,QAAA,WAAA,EAAA,EAAA,EAAA,IAAA,KAAA,EAAA,EAAA,EAAA,IAAA,QAbJ,uDAiBI,MAAA,KACA,iBAAA,QAlBJ,yDAwBM,OAAA,YACA,iBAAA,QAzBN,2DA6BM,MAAA,QACA,OAAA,YASN,0BACE,SAAA,SACA,IAAA,OACA,KAAA,EACA,QAAA,MACA,MAAA,KACA,OAAA,KACA,eAAA,KACA,oBAAA,KAAA,iBAAA,KAAA,gBAAA,KAAA,YAAA,KACA,iBAAA,KACA,kBAAA,UACA,oBAAA,OAAA,OACA,wBAAA,IAAA,IAAA,gBAAA,IAAA,IAQF,2ClB3EI,cAAA,OkB2EJ,yEAMI,iBAAA,yMANJ,+EAUI,iBAAA,QACA,iBAAA,sJASJ,wCAEI,cAAA,IAFJ,sEAMI,iBAAA,mJAUJ,yBACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OAFF,yCAKI,cAAA,OALJ,yDAQM,YAAA,EAYN,eACE,QAAA,aACA,UAAA,KAEA,OAAA,oBACA,QAAA,QAAA,QAAA,QAAA,OACA,YAAA,KACA,MAAA,QACA,eAAA,OACA,WAAA,KAAA,oKAAA,UAAA,MAAA,OAAA,OACA,wBAAA,IAAA,KAAA,gBAAA,IAAA,KACA,OAAA,IAAA,MAAA,gBlB9IE,cAAA,OkBiJF,gBAAA,KACA,mBAAA,KAfF,qBAkBI,aAAA,QACA,QAAA,EAnBJ,gCA4BM,MAAA,QACA,iBAAA,KA7BN,wBAkCI,MAAA,QACA,OAAA,YACA,iBAAA,QApCJ,2BAyCI,QAAA,EAIJ,kBACE,YAAA,QACA,eAAA,QACA,UAAA,IAaF,aACE,SAAA,SACA,QAAA,aACA,UAAA,KACA,OAAA,OACA,cAAA,EACA,OAAA,QAGF,mBACE,UAAA,MACA,UAAA,KACA,OAAA,OACA,OAAA,EACA,OAAA,iBACA,QAAA,EAOF,qBACE,SAAA,SACA,IAAA,EACA,MAAA,EACA,KAAA,EACA,QAAA,EACA,OAAA,OACA,QAAA,MAAA,KACA,YAAA,IACA,MAAA,QACA,eAAA,KACA,oBAAA,KAAA,iBAAA,KAAA,gBAAA,KAAA,YAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,gBlBnOE,cAAA,OkBsNJ,qCAmBM,QxB8SkB,iBwBjUxB,6BAwBI,SAAA,SACA,IAAA,KACA,MAAA,KACA,OAAA,KACA,QAAA,EACA,QAAA,MACA,OAAA,OACA,QAAA,MAAA,KACA,YAAA,IACA,MAAA,QACA,iBAAA,QACA,OAAA,IAAA,MAAA,gBlBzPA,cAAA,EAAA,OAAA,OAAA,EkBsNJ,sCAyCM,QxB2RU,SyBzhBhB,KACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,aAAA,EACA,cAAA,EACA,WAAA,KAGF,UACE,QAAA,MACA,QAAA,KAAA,IxBME,gBAAA,gBwBHA,gBAAA,KALJ,mBAUI,MAAA,QACA,OAAA,YASJ,UACE,cAAA,IAAA,MAAA,KADF,oBAII,cAAA,KAJJ,oBAQI,OAAA,IAAA,MAAA,YnB9BA,wBAAA,OACA,uBAAA,OmBqBJ,0BAA2B,0BAYrB,aAAA,QAAA,QAAA,KAZN,6BAgBM,MAAA,QACA,iBAAA,YACA,aAAA,YAlBN,mCAAA,2BAwBI,MAAA,QACA,iBAAA,KACA,aAAA,KAAA,KAAA,KA1BJ,yBA+BI,WAAA,KnBrDA,wBAAA,EACA,uBAAA,EmB+DJ,qBnBtEI,cAAA,OmBsEJ,oCAAA,4BAOI,MAAA,KACA,OAAA,QACA,iBAAA,QASJ,oBAEI,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,WAAA,OAIJ,yBAEI,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,WAAA,OASJ,uBAEI,QAAA,KAFJ,qBAKI,QAAA,MCnGJ,QACE,SAAA,SACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,QAAA,MAAA,KAQF,cACE,QAAA,aACA,YAAA,OACA,eAAA,OACA,aAAA,KACA,UAAA,QACA,YAAA,QACA,YAAA,OzBhBE,oBAAA,oByBmBA,gBAAA,KASJ,YACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,aAAA,EACA,cAAA,EACA,WAAA,KALF,sBAQI,cAAA,EACA,aAAA,EASJ,aACE,QAAA,aACA,YAAA,QACA,eAAA,QAUF,gBACE,mBAAA,WAAA,oBAAA,MAAA,WAAA,WACA,QAAA,OAAA,OACA,UAAA,QACA,YAAA,EACA,WAAA,IACA,OAAA,IAAA,MAAA,YpBjFE,cAAA,OLgBA,sBAAA,sByBqEA,gBAAA,KAMJ,qBACE,QAAA,aACA,MAAA,MACA,OAAA,MACA,eAAA,OACA,QAAW,GACX,WAAA,UAAA,OAAA,OACA,wBAAA,KAAA,KAAA,gBAAA,KAAA,KAKF,qBACE,SAAA,SACA,KAAA,KAEF,sBACE,SAAA,SACA,MAAA,Kf5CE,yBeiDF,8CASU,SAAA,OACA,MAAA,KAVV,8BAeQ,cAAA,EACA,aAAA,Gf9EN,yBe8DF,mBAqBM,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAvBN,+BA0BQ,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IA1BR,yCA6BU,cAAA,MACA,aAAA,MA9BV,8BAoCQ,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAtCR,oCA2CQ,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACA,MAAA,KA5CR,mCAiDQ,QAAA,MflGN,yBesDA,iDAIQ,SAAA,OACA,MAAA,KALR,iCAUM,cAAA,EACA,aAAA,Gf9EN,yBemEA,sBAgBI,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAlBJ,kCAqBM,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IArBN,4CAwBQ,cAAA,MACA,aAAA,MAzBR,iCA+BM,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAjCN,uCAsCM,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACA,MAAA,KAvCN,sCA4CM,QAAA,MflGN,yBesDA,iDAIQ,SAAA,OACA,MAAA,KALR,iCAUM,cAAA,EACA,aAAA,Gf9EN,yBemEA,sBAgBI,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAlBJ,kCAqBM,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IArBN,4CAwBQ,cAAA,MACA,aAAA,MAzBR,iCA+BM,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAjCN,uCAsCM,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACA,MAAA,KAvCN,sCA4CM,QAAA,MflGN,0BesDA,iDAIQ,SAAA,OACA,MAAA,KALR,iCAUM,cAAA,EACA,aAAA,Gf9EN,0BemEA,sBAgBI,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAlBJ,kCAqBM,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IArBN,4CAwBQ,cAAA,MACA,aAAA,MAzBR,iCA+BM,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAjCN,uCAsCM,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACA,MAAA,KAvCN,sCA4CM,QAAA,MA5CN,sBAgBI,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAlBJ,iDAIQ,SAAA,OACA,MAAA,KALR,iCAUM,cAAA,EACA,aAAA,EAXN,kCAqBM,mBAAA,WAAA,sBAAA,OAAA,uBAAA,IAAA,mBAAA,IAAA,eAAA,IArBN,4CAwBQ,cAAA,MACA,aAAA,MAzBR,iCA+BM,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,cAAA,OAAA,UAAA,OACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OAjCN,uCAsCM,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACA,MAAA,KAvCN,sCA4CM,QAAA,KAaV,4BAAA,8BAGI,MAAA,eAHJ,kCAAmC,kCAAnC,oCAAA,oCAMM,MAAA,eANN,oCAYM,MAAA,eAZN,0CAA2C,0CAenC,MAAA,eAfR,6CAmBQ,MAAA,eAnBR,4CAAA,2CAAA,yCAAA,0CA2BM,MAAA,eA3BN,8BAgCI,aAAA,eAhCJ,mCAoCI,iBAAA,oPApCJ,2BAwCI,MAAA,eAKJ,8BAAA,gCAGI,MAAA,KAHJ,oCAAqC,oCAArC,sCAAA,sCAMM,MAAA,KANN,sCAYM,MAAA,qBAZN,4CAA6C,4CAerC,MAAA,sBAfR,+CAmBQ,MAAA,sBAnBR,8CAAA,6CAAA,2CAAA,4CA2BM,MAAA,KA3BN,gCAgCI,aAAA,qBAhCJ,qCAoCI,iBAAA,0PApCJ,6BAwCI,MAAA,qBCrQJ,MACE,SAAA,SACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,iBAAA,KACA,OAAA,IAAA,MAAA,iBrBLE,cAAA,OqBSJ,YAGE,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,QAAA,QAGF,YACE,cAAA,OAGF,eACE,WAAA,SACA,cAAA,EAGF,sBACE,cAAA,E1BpBE,iB0ByBA,gBAAA,KAFJ,sBAMI,YAAA,QAIJ,2DrBjCI,wBAAA,OACA,uBAAA,OqBgCJ,yDrBnBI,2BAAA,OACA,0BAAA,OqBqCJ,aACE,QAAA,OAAA,QACA,cAAA,EACA,iBAAA,QACA,cAAA,IAAA,MAAA,iBAJF,yBrB1DI,cAAA,mBAAA,mBAAA,EAAA,EqBqEJ,aACE,QAAA,OAAA,QACA,iBAAA,QACA,WAAA,IAAA,MAAA,iBAHF,wBrBrEI,cAAA,EAAA,EAAA,mBAAA,mBqBoFJ,kBACE,aAAA,SACA,cAAA,QACA,YAAA,SACA,cAAA,EAGF,mBACE,aAAA,SACA,YAAA,SAQF,cCtGE,iBAAA,QACA,aAAA,QAEA,2BAAA,2BAEE,iBAAA,YDoGJ,cCzGE,iBAAA,QACA,aAAA,QAEA,2BAAA,2BAEE,iBAAA,YDuGJ,WC5GE,iBAAA,QACA,aAAA,QAEA,wBAAA,wBAEE,iBAAA,YD0GJ,cC/GE,iBAAA,QACA,aAAA,QAEA,2BAAA,2BAEE,iBAAA,YD6GJ,aClHE,iBAAA,QACA,aAAA,QAEA,0BAAA,0BAEE,iBAAA,YDkHJ,sBC7GE,iBAAA,YACA,aAAA,QD+GF,wBChHE,iBAAA,YACA,aAAA,KDkHF,mBCnHE,iBAAA,YACA,aAAA,QDqHF,sBCtHE,iBAAA,YACA,aAAA,QDwHF,sBCzHE,iBAAA,YACA,aAAA,QD2HF,qBC5HE,iBAAA,YACA,aAAA,QDmIF,cC3HE,MAAA,sBAEA,2BAAA,2BAEE,iBAAA,YACA,aAAA,qBAEF,+BAAA,2BAAA,2BAAA,0BAIE,MAAA,KAEF,kDAAA,yBAAA,6BAAA,yBAIE,MAAA,sBAEF,+BAAA,+BAEI,MAAA,KD8GN,iBACE,QAAA,EACA,cAAA,EACA,YAAA,EAIF,UrB5JI,cAAA,mBqBgKJ,kBACE,SAAA,SACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,QAMF,crBtKI,wBAAA,mBACA,uBAAA,mBqBwKJ,iBrB3JI,2BAAA,mBACA,0BAAA,mBK+BA,yBgBmIF,WACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,IAAA,KAAA,cAAA,IAAA,KAAA,UAAA,IAAA,KAFF,iBAKI,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,iBAAA,EAAA,aAAA,EAAA,EAAA,GAAA,SAAA,EAAA,EAAA,GAAA,KAAA,EAAA,EAAA,GACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OAPJ,mCAY0B,YAAA,KAZ1B,kCAayB,aAAA,MhBhJvB,yBgB2JF,YACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,IAAA,KAAA,cAAA,IAAA,KAAA,UAAA,IAAA,KAFF,kBAKI,iBAAA,EAAA,aAAA,EAAA,EAAA,GAAA,SAAA,EAAA,EAAA,GAAA,KAAA,EAAA,EAAA,GALJ,wBAQM,YAAA,EACA,YAAA,EATN,8BrBlME,2BAAA,EACA,wBAAA,EqBiMF,4CAkBU,wBAAA,EAlBV,+CAqBU,2BAAA,EArBV,6BrBpLE,0BAAA,EACA,uBAAA,EqBmLF,2CA4BU,uBAAA,EA5BV,8CA+BU,0BAAA,EA/BV,qDAoCQ,cAAA,EApCR,sEAAA,mEAwCU,cAAA,GhBnMR,yBgBiNF,cACE,qBAAA,EAAA,kBAAA,EAAA,aAAA,EACA,mBAAA,QAAA,gBAAA,QAAA,WAAA,QAFF,oBAKI,QAAA,aACA,MAAA,KACA,cAAA,QEhRN,YACE,QAAA,OAAA,KACA,cAAA,KACA,WAAA,KACA,iBAAA,QvBAE,cAAA,OwBHF,mBACE,QAAA,MACA,QAAW,GACX,MAAA,KDKJ,iBACE,MAAA,KADF,0CAKI,QAAA,aACA,cAAA,MACA,aAAA,MACA,MAAA,QACA,QAAiC,IATrC,gDAmBI,gBAAA,UAnBJ,gDAsBI,gBAAA,KAtBJ,wBA0BI,MAAA,QEnCJ,YACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KAEA,aAAA,EACA,WAAA,KzBAE,cAAA,OyBIJ,kCAGM,YAAA,EzBoBF,0BAAA,OACA,uBAAA,OyBxBJ,iCzBSI,2BAAA,OACA,wBAAA,OyBVJ,6BAcI,QAAA,EACA,MAAA,KACA,iBAAA,QACA,aAAA,QAjBJ,+BAqBI,MAAA,QACA,eAAA,KACA,OAAA,YACA,iBAAA,KACA,aAAA,KAIJ,WACE,SAAA,SACA,QAAA,MACA,QAAA,MAAA,OACA,YAAA,KACA,YAAA,KACA,MAAA,QACA,iBAAA,KACA,OAAA,IAAA,MAAA,K9BzBE,iBAAA,iB8B4BA,MAAA,QACA,gBAAA,KACA,iBAAA,QACA,aAAA,KChDF,0BACE,QAAA,OAAA,OACA,UAAA,QAKE,iD1BqBF,0BAAA,MACA,uBAAA,M0BjBE,gD1BEF,2BAAA,MACA,wBAAA,M0BfF,0BACE,QAAA,OAAA,MACA,UAAA,QAKE,iD1BqBF,0BAAA,MACA,uBAAA,M0BjBE,gD1BEF,2BAAA,MACA,wBAAA,M2BbJ,OACE,QAAA,aACA,QAAA,MAAA,KACA,UAAA,IACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OACA,YAAA,OACA,eAAA,S3BVE,cAAA,O2BCJ,aAcI,QAAA,KAKJ,YACE,SAAA,SACA,IAAA,KhCNE,cAAA,cgCaA,MAAA,KACA,gBAAA,KACA,OAAA,QASJ,YACE,cAAA,KACA,aAAA,K3B1CE,cAAA,M2BkDJ,eCnDE,iBAAA,QjCiBE,2BAAA,2BiCbE,iBAAA,QDmDN,eCvDE,iBAAA,QjCiBE,2BAAA,2BiCbE,iBAAA,QDuDN,eC3DE,iBAAA,QjCiBE,2BAAA,2BiCbE,iBAAA,QD2DN,YC/DE,iBAAA,QjCiBE,wBAAA,wBiCbE,iBAAA,QD+DN,eCnEE,iBAAA,QjCiBE,2BAAA,2BiCbE,iBAAA,QDmEN,cCvEE,iBAAA,QjCiBE,0BAAA,0BiCbE,iBAAA,QCPN,WACE,QAAA,KAAA,KACA,cAAA,KACA,iBAAA,Q7BCE,cAAA,MKoDA,yBwBxDF,WAOE,QAAA,KAAA,MAIJ,cACE,iBAAA,QAGF,iBACE,cAAA,EACA,aAAA,E7BbE,cAAA,E8BAJ,OACE,QAAA,OAAA,QACA,cAAA,KACA,OAAA,IAAA,MAAA,Y9BHE,cAAA,O8BQJ,eAEE,MAAA,QAIF,YACE,YAAA,IAQF,0BAGI,SAAA,SACA,IAAA,QACA,MAAA,SACA,QAAA,OAAA,QACA,MAAA,QASJ,eCxCE,iBAAA,QACA,aAAA,QACA,MAAA,QAEA,kBACE,iBAAA,QAEF,2BACE,MAAA,QDmCJ,YC3CE,iBAAA,QACA,aAAA,QACA,MAAA,QAEA,eACE,iBAAA,QAEF,wBACE,MAAA,QDsCJ,eC9CE,iBAAA,QACA,aAAA,QACA,MAAA,QAEA,kBACE,iBAAA,QAEF,2BACE,MAAA,QDyCJ,cCjDE,iBAAA,QACA,aAAA,QACA,MAAA,QAEA,iBACE,iBAAA,QAEF,0BACE,MAAA,QCVJ,wCACE,KAAO,oBAAA,KAAA,EACP,GAAK,oBAAA,EAAA,GAFP,mCACE,KAAO,oBAAA,KAAA,EACP,GAAK,oBAAA,EAAA,GAFP,gCACE,KAAO,oBAAA,KAAA,EACP,GAAK,oBAAA,EAAA,GAIP,UACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,SAAA,OACA,UAAA,OACA,YAAA,KACA,WAAA,OACA,iBAAA,QhCTE,cAAA,OgCYJ,cACE,OAAA,KACA,MAAA,KACA,iBAAA,QAIF,sBCYE,iBAAA,yKAAA,iBAAA,oKAAA,iBAAA,iKDVA,wBAAA,KAAA,KAAA,gBAAA,KAAA,KAIF,uBACE,kBAAA,qBAAA,GAAA,OAAA,SAAA,aAAA,qBAAA,GAAA,OAAA,SAAA,UAAA,qBAAA,GAAA,OAAA,SE9BF,OACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,MAAA,oBAAA,WAAA,eAAA,MAAA,YAAA,WAGF,YACE,iBAAA,EAAA,aAAA,EAAA,EAAA,GAAA,SAAA,EAAA,EAAA,GAAA,KAAA,EAAA,EAAA,GCFF,YACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OAGA,aAAA,EACA,cAAA,EASF,wBACE,MAAA,KACA,MAAA,QACA,WAAA,QAHF,iDAMI,MAAA,QxCLA,8BAAA,8BwCUA,MAAA,QACA,gBAAA,KACA,iBAAA,QAbJ,+BAiBI,MAAA,QACA,iBAAA,QASJ,iBACE,SAAA,SACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,IAAA,KAAA,cAAA,IAAA,KAAA,UAAA,IAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,QAAA,OAAA,QAEA,cAAA,KACA,iBAAA,KACA,OAAA,IAAA,MAAA,iBATF,6BnCpCI,wBAAA,OACA,uBAAA,OmCmCJ,4BAgBI,cAAA,EnCtCA,2BAAA,OACA,0BAAA,OLLA,uBAAA,uBwC+CA,gBAAA,KArBJ,0BAA2B,0BA0BvB,MAAA,QACA,OAAA,YACA,iBAAA,KA5BJ,mDAAoD,mDAgC9C,MAAA,QAhCN,gDAAiD,gDAmC3C,MAAA,QAnCN,wBAyCI,QAAA,EACA,MAAA,KACA,iBAAA,QACA,aAAA,QA5CJ,iDAAA,wDAAA,uDAkDM,MAAA,QAlDN,8CAsDM,MAAA,QAWN,mCAEI,aAAA,EACA,YAAA,EACA,cAAA,EAJJ,2DASM,WAAA,EATN,yDAeM,cAAA,EC3HJ,yBACE,MAAA,QACA,iBAAA,QAGF,0BAAA,+BACE,MAAA,QADF,mDAAA,wDAII,MAAA,QzCQF,gCAAA,gCAAA,qCAAA,qCyCJE,MAAA,QACA,iBAAA,QATJ,iCAAA,sCAaI,MAAA,KACA,iBAAA,QACA,aAAA,QApBJ,sBACE,MAAA,QACA,iBAAA,QAGF,uBAAA,4BACE,MAAA,QADF,gDAAA,qDAII,MAAA,QzCQF,6BAAA,6BAAA,kCAAA,kCyCJE,MAAA,QACA,iBAAA,QATJ,8BAAA,mCAaI,MAAA,KACA,iBAAA,QACA,aAAA,QApBJ,yBACE,MAAA,QACA,iBAAA,QAGF,0BAAA,+BACE,MAAA,QADF,mDAAA,wDAII,MAAA,QzCQF,gCAAA,gCAAA,qCAAA,qCyCJE,MAAA,QACA,iBAAA,QATJ,iCAAA,sCAaI,MAAA,KACA,iBAAA,QACA,aAAA,QApBJ,wBACE,MAAA,QACA,iBAAA,QAGF,yBAAA,8BACE,MAAA,QADF,kDAAA,uDAII,MAAA,QzCQF,+BAAA,+BAAA,oCAAA,oCyCJE,MAAA,QACA,iBAAA,QATJ,gCAAA,qCAaI,MAAA,KACA,iBAAA,QACA,aAAA,QCrBN,kBACE,SAAA,SACA,QAAA,MACA,MAAA,KACA,QAAA,EACA,SAAA,OALF,0BAQI,QAAA,MACA,QAAW,GATf,yCAAA,wBAAA,yBAAA,yBAAA,wBAiBI,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,KACA,OAAA,KACA,OAAA,EAIJ,gCAEI,YAAA,WAIJ,gCAEI,YAAA,OAIJ,+BAEI,YAAA,IAIJ,+BAEI,YAAA,KCjDJ,OACE,MAAA,MACA,UAAA,OACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,YAAA,EAAA,IAAA,EAAA,KACA,QAAA,G3CaE,aAAA,a2CVA,MAAA,KACA,gBAAA,KACA,OAAA,QACA,QAAA,IAUJ,aACE,QAAA,EACA,OAAA,QACA,WAAA,IACA,OAAA,EACA,mBAAA,KCrBF,YACE,SAAA,OAIF,OACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,SAAA,OAGA,QAAA,EAXF,0BtCGM,mBAAA,kBAAA,IAAA,SAAA,WAAA,kBAAA,IAAA,SAAA,cAAA,aAAA,IAAA,SAAA,WAAA,UAAA,IAAA,SAAA,WAAA,UAAA,IAAA,SAAA,kBAAA,IAAA,SAAA,aAAA,IAAA,SsCgBF,kBAAA,kBAAA,aAAA,kBAAA,UAAA,kBAnBJ,0BAqByB,kBAAA,eAAA,aAAA,eAAA,UAAA,eAEzB,mBACE,WAAA,OACA,WAAA,KAIF,cACE,SAAA,SACA,MAAA,KACA,OAAA,KAIF,eACE,SAAA,SACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,mBAAA,SAAA,sBAAA,OAAA,uBAAA,OAAA,mBAAA,OAAA,eAAA,OACA,iBAAA,KACA,wBAAA,YAAA,gBAAA,YACA,OAAA,IAAA,MAAA,evClDE,cAAA,MuCsDF,QAAA,EAIF,gBACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KACA,iBAAA,KAPF,qBAUW,QAAA,EAVX,qBAWW,QAAA,GAKX,cACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,QAAA,wBAAA,cAAA,cAAA,QAAA,gBAAA,cACA,QAAA,KACA,cAAA,IAAA,MAAA,QAIF,aACE,cAAA,EACA,YAAA,IAKF,YACE,SAAA,SAGA,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,QAAA,KAIF,cACE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,IAAA,wBAAA,SAAA,cAAA,IAAA,gBAAA,SACA,QAAA,KACA,WAAA,IAAA,MAAA,QALF,iCAQyB,YAAA,OARzB,gCASwB,aAAA,OAIxB,yBACE,SAAA,SACA,IAAA,QACA,MAAA,KACA,OAAA,KACA,SAAA,OlCjEE,yBkCuEF,cACE,UAAA,MACA,OAAA,KAAA,KAOF,UAAY,UAAA,OlChFV,yBkCoFF,UAAY,UAAA,OC3Id,SACE,SAAA,SACA,QAAA,KACA,QAAA,MCHA,YAAA,cAAA,UAAA,mBAAA,WAAA,O/CqP4H,iB+CrP5H,MAAA,WAEA,WAAA,OACA,YAAA,IACA,eAAA,OACA,WAAA,KACA,YAAA,IACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,YAAA,OACA,WAAA,OACA,aAAA,ODPA,UAAA,QAEA,UAAA,WACA,QAAA,EAVF,cAYW,QAAA,GAZW,2CAAtB,qBAgBI,QAAA,IAAA,EACA,WAAA,KAjByC,kEAA7C,4CAoBM,OAAA,EACA,KAAA,IACA,YAAA,KACA,QAAW,GACX,aAAA,IAAA,IAAA,EACA,iBAAA,KAzBkB,yCAAxB,uBA8BI,QAAA,EAAA,IACA,YAAA,IA/B2C,gEAA/C,8CAkCM,IAAA,IACA,KAAA,EACA,WAAA,KACA,QAAW,GACX,aAAA,IAAA,IAAA,IAAA,EACA,mBAAA,KAvCmB,wCAAzB,wBA4CI,QAAA,IAAA,EACA,WAAA,IA7C4C,+DAAhD,+CAgDM,IAAA,EACA,KAAA,IACA,YAAA,KACA,QAAW,GACX,aAAA,EAAA,IAAA,IACA,oBAAA,KArDiB,0CAAvB,sBA0DI,QAAA,EAAA,IACA,YAAA,KA3D0C,iEAA9C,6CA8DM,IAAA,IACA,MAAA,EACA,WAAA,KACA,QAAW,GACX,aAAA,IAAA,EAAA,IAAA,IACA,kBAAA,KAMN,eACE,UAAA,MACA,QAAA,IAAA,IACA,MAAA,KACA,WAAA,OACA,iBAAA,KxC3EE,cAAA,OwCsEJ,uBASI,SAAA,SACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MEvFJ,SACE,SAAA,SACA,IAAA,EACA,KAAA,EACA,QAAA,KACA,QAAA,MACA,UAAA,MACA,QAAA,IDNA,YAAA,cAAA,UAAA,mBAAA,WAAA,O/CqP4H,iB+CrP5H,MAAA,WAEA,WAAA,OACA,YAAA,IACA,eAAA,OACA,WAAA,KACA,YAAA,IACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,YAAA,OACA,WAAA,OACA,aAAA,OCJA,UAAA,QAEA,UAAA,WACA,iBAAA,KACA,wBAAA,YAAA,gBAAA,YACA,OAAA,IAAA,MAAA,e1CZE,cAAA,M0CJkB,2CAAtB,qBAyBI,WAAA,MAzB2G,kDAApD,mDAA7B,4BAA9B,6BA6BM,KAAA,IACA,oBAAA,EA9BwB,mDAA9B,6BAkCM,OAAA,MACA,YAAA,MACA,iBAAA,gBApCuB,kDAA7B,4BAwCM,OAAA,MACA,YAAA,MACA,iBAAA,KA1CkB,yCAAxB,uBAgDI,YAAA,KAhD6G,gDAAlD,iDAA/B,8BAAhC,+BAoDM,IAAA,IACA,kBAAA,EArD0B,iDAAhC,+BAyDM,KAAA,MACA,WAAA,MACA,mBAAA,gBA3DyB,gDAA/B,8BA+DM,KAAA,MACA,WAAA,MACA,mBAAA,KAjEmB,wCAAzB,wBAuEI,WAAA,KAvE8G,+CAAjD,gDAAhC,+BAAjC,gCA2EM,KAAA,IACA,iBAAA,EA5E2B,gDAAjC,gCAgFM,IAAA,MACA,YAAA,MACA,oBAAA,gBAlF0B,+CAAhC,+BAsFM,IAAA,MACA,YAAA,MACA,oBAAA,QAxF0C,+DAAhD,+CA6FM,SAAA,SACA,IAAA,EACA,KAAA,IACA,QAAA,MACA,MAAA,KACA,YAAA,MACA,QAAW,GACX,cAAA,IAAA,MAAA,QApGiB,0CAAvB,sBA0GI,YAAA,MA1G4G,iDAAnD,kDAA9B,6BAA/B,8BA8GM,IAAA,IACA,mBAAA,EA/GyB,kDAA/B,8BAmHM,MAAA,MACA,WAAA,MACA,kBAAA,gBArHwB,iDAA9B,6BAyHM,MAAA,MACA,WAAA,MACA,kBAAA,KAON,eACE,QAAA,IAAA,KACA,cAAA,EACA,UAAA,KACA,iBAAA,QACA,cAAA,IAAA,MAAA,Q1C7HE,wBAAA,kBACA,uBAAA,kB0CuHJ,qBAUI,QAAA,KAIJ,iBACE,QAAA,IAAA,KAQF,gBAAA,iBAEE,SAAA,SACA,QAAA,MACA,MAAA,EACA,OAAA,EACA,aAAA,YACA,aAAA,MAGF,iBACE,QAAW,GACX,aAAA,KAEF,gBACE,QAAW,GACX,aAAA,KCxKF,UACE,SAAA,SAGF,gBACE,SAAA,SACA,MAAA,KACA,SAAA,OAGF,eACE,SAAA,SACA,QAAA,KACA,MAAA,KCZA,8BDSA,e1CII,mBAAA,kBAAA,IAAA,YAAA,WAAA,kBAAA,IAAA,YAAA,cAAA,aAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,kBAAA,IAAA,YAAA,aAAA,IAAA,Y0CGF,4BAAA,OAAA,oBAAA,OACA,oBAAA,OAAA,YAAA,QCVuC,qFDEzC,e1CII,mBAAA,kBAAA,IAAA,YAAA,WAAA,kBAAA,IAAA,YAAA,cAAA,aAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,WAAA,UAAA,IAAA,YAAA,kBAAA,IAAA,YAAA,aAAA,IAAA,Y0CGF,4BAAA,OAAA,oBAAA,OACA,oBAAA,OAAA,YAAA,QAIJ,oBAAA,oBAAA,sBAGE,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KAGF,oBAAA,oBAEE,SAAA,SACA,IAAA,EC9BA,8BDmCA,uCAAA,wCAEE,kBAAA,mBAAA,UAAA,mBAGF,4BAAA,oBAEE,kBAAA,sBAAA,UAAA,sBAGF,2BAAA,oBAEE,kBAAA,uBAAA,UAAA,wBCxCuC,qFD4BzC,uCAAA,wCAEE,kBAAA,mBAAA,UAAA,mBAGF,4BAAA,oBAEE,kBAAA,sBAAA,UAAA,sBAGF,2BAAA,oBAEE,kBAAA,uBAAA,UAAA,wBASJ,uBAAA,uBAEE,SAAA,SACA,IAAA,EACA,OAAA,EAEA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,kBAAA,OAAA,oBAAA,OAAA,eAAA,OAAA,YAAA,OACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OACA,MAAA,IACA,MAAA,KACA,WAAA,OACA,QAAA,GhDlDE,6BAAA,6BAAA,6BAAA,6BgDwDA,MAAA,KACA,gBAAA,KACA,QAAA,EACA,QAAA,GAGJ,uBACE,KAAA,EAEF,uBACE,MAAA,EAIF,4BAAA,4BAEE,QAAA,aACA,MAAA,KACA,OAAA,KACA,WAAA,YAAA,UAAA,OAAA,OACA,wBAAA,KAAA,KAAA,gBAAA,KAAA,KAEF,4BACE,iBAAA,4LAEF,4BACE,iBAAA,8LASF,qBACE,SAAA,SACA,MAAA,EACA,OAAA,KACA,KAAA,EACA,QAAA,GACA,QAAA,YAAA,QAAA,aAAA,QAAA,YAAA,QAAA,KACA,iBAAA,OAAA,wBAAA,OAAA,cAAA,OAAA,gBAAA,OACA,aAAA,EAEA,aAAA,IACA,YAAA,IACA,WAAA,KAZF,wBAeI,SAAA,SACA,iBAAA,EAAA,aAAA,EAAA,EAAA,KAAA,SAAA,EAAA,EAAA,KAAA,KAAA,EAAA,EAAA,KACA,UAAA,KACA,OAAA,IACA,aAAA,IACA,YAAA,IACA,YAAA,OACA,OAAA,QACA,iBAAA,qBAvBJ,gCA2BM,SAAA,SACA,IAAA,MACA,KAAA,EACA,QAAA,aACA,MAAA,KACA,OAAA,KACA,QAAW,GAjCjB,+BAoCM,SAAA,SACA,OAAA,MACA,KAAA,EACA,QAAA,aACA,MAAA,KACA,OAAA,KACA,QAAW,GA1CjB,6BA+CI,iBAAA,KASJ,kBACE,SAAA,SACA,MAAA,IACA,OAAA,KACA,KAAA,IACA,QAAA,GACA,YAAA,KACA,eAAA,KACA,MAAA,KACA,WAAA,OEhLF,gBAAqB,eAAA,mBACrB,WAAqB,eAAA,cACrB,cAAqB,eAAA,iBACrB,cAAqB,eAAA,iBACrB,mBAAqB,eAAA,sBACrB,gBAAqB,eAAA,mBCDrB,UACE,iBAAA,QCFA,YACE,iBAAA,kBpDgBA,mBAAA,mBoDZE,iBAAA,kBALJ,YACE,iBAAA,kBpDgBA,mBAAA,mBoDZE,iBAAA,kBALJ,SACE,iBAAA,kBpDgBA,gBAAA,gBoDZE,iBAAA,kBALJ,YACE,iBAAA,kBpDgBA,mBAAA,mBoDZE,iBAAA,kBALJ,WACE,iBAAA,kBpDgBA,kBAAA,kBoDZE,iBAAA,kBALJ,YACE,iBAAA,kBpDgBA,mBAAA,mBoDZE,iBAAA,kBCJN,UAAmB,OAAA,YACnB,cAAmB,WAAA,YACnB,gBAAmB,aAAA,YACnB,iBAAmB,cAAA,YACnB,eAAmB,YAAA,YAMnB,ShDVI,cAAA,OgDaJ,ahDPI,wBAAA,OACA,uBAAA,OgDSJ,ehDHI,2BAAA,OACA,wBAAA,OgDKJ,gBhDCI,2BAAA,OACA,0BAAA,OgDCJ,chDKI,0BAAA,OACA,uBAAA,OgDFJ,gBACE,cAAA,IAGF,WACE,cAAA,ExBlCA,iBACE,QAAA,MACA,QAAW,GACX,MAAA,KyBIA,QAAE,QAAA,eACF,UAAE,QAAA,iBACF,gBAAE,QAAA,uBACF,SAAE,QAAA,gBACF,SAAE,QAAA,gBACF,cAAE,QAAA,qBACF,QAAE,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACF,eAAE,QAAA,6BAAA,QAAA,8BAAA,QAAA,6BAAA,QAAA,sB5CyCF,yB4ChDA,WAAE,QAAA,eACF,aAAE,QAAA,iBACF,mBAAE,QAAA,uBACF,YAAE,QAAA,gBACF,YAAE,QAAA,gBACF,iBAAE,QAAA,qBACF,WAAE,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACF,kBAAE,QAAA,6BAAA,QAAA,8BAAA,QAAA,6BAAA,QAAA,uB5CyCF,yB4ChDA,WAAE,QAAA,eACF,aAAE,QAAA,iBACF,mBAAE,QAAA,uBACF,YAAE,QAAA,gBACF,YAAE,QAAA,gBACF,iBAAE,QAAA,qBACF,WAAE,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACF,kBAAE,QAAA,6BAAA,QAAA,8BAAA,QAAA,6BAAA,QAAA,uB5CyCF,yB4ChDA,WAAE,QAAA,eACF,aAAE,QAAA,iBACF,mBAAE,QAAA,uBACF,YAAE,QAAA,gBACF,YAAE,QAAA,gBACF,iBAAE,QAAA,qBACF,WAAE,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACF,kBAAE,QAAA,6BAAA,QAAA,8BAAA,QAAA,6BAAA,QAAA,uB5CyCF,0B4ChDA,WAAE,QAAA,eACF,aAAE,QAAA,iBACF,mBAAE,QAAA,uBACF,YAAE,QAAA,gBACF,YAAE,QAAA,gBACF,iBAAE,QAAA,qBACF,WAAE,QAAA,sBAAA,QAAA,uBAAA,QAAA,sBAAA,QAAA,eACF,kBAAE,QAAA,6BAAA,QAAA,8BAAA,QAAA,6BAAA,QAAA,uBCPF,YAAE,0BAAA,EAAA,cAAA,GAAA,eAAA,GAAA,MAAA,GACF,WAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EACF,gBAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EAEF,UAAE,mBAAA,qBAAA,sBAAA,iBAAA,uBAAA,cAAA,mBAAA,cAAA,eAAA,cACF,aAAE,mBAAA,mBAAA,sBAAA,iBAAA,uBAAA,iBAAA,mBAAA,iBAAA,eAAA,iBACF,kBAAE,mBAAA,qBAAA,sBAAA,kBAAA,uBAAA,sBAAA,mBAAA,sBAAA,eAAA,sBACF,qBAAE,mBAAA,mBAAA,sBAAA,kBAAA,uBAAA,yBAAA,mBAAA,yBAAA,eAAA,yBAEF,WAAE,kBAAA,eAAA,cAAA,eAAA,UAAA,eACF,aAAE,kBAAA,iBAAA,cAAA,iBAAA,UAAA,iBACF,mBAAE,kBAAA,uBAAA,cAAA,uBAAA,UAAA,uBAEF,uBAAE,iBAAA,gBAAA,wBAAA,qBAAA,cAAA,gBAAA,gBAAA,qBACF,qBAAE,iBAAA,cAAA,wBAAA,mBAAA,cAAA,cAAA,gBAAA,mBACF,wBAAE,iBAAA,iBAAA,wBAAA,iBAAA,cAAA,iBAAA,gBAAA,iBACF,yBAAE,iBAAA,kBAAA,wBAAA,wBAAA,cAAA,kBAAA,gBAAA,wBACF,wBAAE,wBAAA,uBAAA,cAAA,qBAAA,gBAAA,uBAEF,mBAAE,kBAAA,gBAAA,oBAAA,qBAAA,eAAA,gBAAA,YAAA,qBACF,iBAAE,kBAAA,cAAA,oBAAA,mBAAA,eAAA,cAAA,YAAA,mBACF,oBAAE,kBAAA,iBAAA,oBAAA,iBAAA,eAAA,iBAAA,YAAA,iBACF,sBAAE,kBAAA,mBAAA,oBAAA,mBAAA,eAAA,mBAAA,YAAA,mBACF,qBAAE,kBAAA,kBAAA,oBAAA,kBAAA,eAAA,kBAAA,YAAA,kBAEF,qBAAE,sBAAA,qBAAA,mBAAA,gBAAA,cAAA,qBACF,mBAAE,sBAAA,mBAAA,mBAAA,cAAA,cAAA,mBACF,sBAAE,sBAAA,iBAAA,mBAAA,iBAAA,cAAA,iBACF,uBAAE,sBAAA,wBAAA,mBAAA,kBAAA,cAAA,wBACF,sBAAE,sBAAA,uBAAA,mBAAA,qBAAA,cAAA,uBACF,uBAAE,sBAAA,kBAAA,mBAAA,kBAAA,cAAA,kBAEF,iBAAE,mBAAA,eAAA,oBAAA,eAAA,mBAAA,eAAA,WAAA,eACF,kBAAE,mBAAA,qBAAA,oBAAA,gBAAA,WAAA,qBACF,gBAAE,mBAAA,mBAAA,oBAAA,cAAA,WAAA,mBACF,mBAAE,mBAAA,iBAAA,oBAAA,iBAAA,mBAAA,iBAAA,WAAA,iBACF,qBAAE,mBAAA,mBAAA,oBAAA,mBAAA,WAAA,mBACF,oBAAE,mBAAA,kBAAA,oBAAA,kBAAA,mBAAA,kBAAA,WAAA,kB7CWF,yB6ChDA,eAAE,0BAAA,EAAA,cAAA,GAAA,eAAA,GAAA,MAAA,GACF,cAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EACF,mBAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EAEF,aAAE,mBAAA,qBAAA,sBAAA,iBAAA,uBAAA,cAAA,mBAAA,cAAA,eAAA,cACF,gBAAE,mBAAA,mBAAA,sBAAA,iBAAA,uBAAA,iBAAA,mBAAA,iBAAA,eAAA,iBACF,qBAAE,mBAAA,qBAAA,sBAAA,kBAAA,uBAAA,sBAAA,mBAAA,sBAAA,eAAA,sBACF,wBAAE,mBAAA,mBAAA,sBAAA,kBAAA,uBAAA,yBAAA,mBAAA,yBAAA,eAAA,yBAEF,cAAE,kBAAA,eAAA,cAAA,eAAA,UAAA,eACF,gBAAE,kBAAA,iBAAA,cAAA,iBAAA,UAAA,iBACF,sBAAE,kBAAA,uBAAA,cAAA,uBAAA,UAAA,uBAEF,0BAAE,iBAAA,gBAAA,wBAAA,qBAAA,cAAA,gBAAA,gBAAA,qBACF,wBAAE,iBAAA,cAAA,wBAAA,mBAAA,cAAA,cAAA,gBAAA,mBACF,2BAAE,iBAAA,iBAAA,wBAAA,iBAAA,cAAA,iBAAA,gBAAA,iBACF,4BAAE,iBAAA,kBAAA,wBAAA,wBAAA,cAAA,kBAAA,gBAAA,wBACF,2BAAE,wBAAA,uBAAA,cAAA,qBAAA,gBAAA,uBAEF,sBAAE,kBAAA,gBAAA,oBAAA,qBAAA,eAAA,gBAAA,YAAA,qBACF,oBAAE,kBAAA,cAAA,oBAAA,mBAAA,eAAA,cAAA,YAAA,mBACF,uBAAE,kBAAA,iBAAA,oBAAA,iBAAA,eAAA,iBAAA,YAAA,iBACF,yBAAE,kBAAA,mBAAA,oBAAA,mBAAA,eAAA,mBAAA,YAAA,mBACF,wBAAE,kBAAA,kBAAA,oBAAA,kBAAA,eAAA,kBAAA,YAAA,kBAEF,wBAAE,sBAAA,qBAAA,mBAAA,gBAAA,cAAA,qBACF,sBAAE,sBAAA,mBAAA,mBAAA,cAAA,cAAA,mBACF,yBAAE,sBAAA,iBAAA,mBAAA,iBAAA,cAAA,iBACF,0BAAE,sBAAA,wBAAA,mBAAA,kBAAA,cAAA,wBACF,yBAAE,sBAAA,uBAAA,mBAAA,qBAAA,cAAA,uBACF,0BAAE,sBAAA,kBAAA,mBAAA,kBAAA,cAAA,kBAEF,oBAAE,mBAAA,eAAA,oBAAA,eAAA,mBAAA,eAAA,WAAA,eACF,qBAAE,mBAAA,qBAAA,oBAAA,gBAAA,WAAA,qBACF,mBAAE,mBAAA,mBAAA,oBAAA,cAAA,WAAA,mBACF,sBAAE,mBAAA,iBAAA,oBAAA,iBAAA,mBAAA,iBAAA,WAAA,iBACF,wBAAE,mBAAA,mBAAA,oBAAA,mBAAA,WAAA,mBACF,uBAAE,mBAAA,kBAAA,oBAAA,kBAAA,mBAAA,kBAAA,WAAA,mB7CWF,yB6ChDA,eAAE,0BAAA,EAAA,cAAA,GAAA,eAAA,GAAA,MAAA,GACF,cAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EACF,mBAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EAEF,aAAE,mBAAA,qBAAA,sBAAA,iBAAA,uBAAA,cAAA,mBAAA,cAAA,eAAA,cACF,gBAAE,mBAAA,mBAAA,sBAAA,iBAAA,uBAAA,iBAAA,mBAAA,iBAAA,eAAA,iBACF,qBAAE,mBAAA,qBAAA,sBAAA,kBAAA,uBAAA,sBAAA,mBAAA,sBAAA,eAAA,sBACF,wBAAE,mBAAA,mBAAA,sBAAA,kBAAA,uBAAA,yBAAA,mBAAA,yBAAA,eAAA,yBAEF,cAAE,kBAAA,eAAA,cAAA,eAAA,UAAA,eACF,gBAAE,kBAAA,iBAAA,cAAA,iBAAA,UAAA,iBACF,sBAAE,kBAAA,uBAAA,cAAA,uBAAA,UAAA,uBAEF,0BAAE,iBAAA,gBAAA,wBAAA,qBAAA,cAAA,gBAAA,gBAAA,qBACF,wBAAE,iBAAA,cAAA,wBAAA,mBAAA,cAAA,cAAA,gBAAA,mBACF,2BAAE,iBAAA,iBAAA,wBAAA,iBAAA,cAAA,iBAAA,gBAAA,iBACF,4BAAE,iBAAA,kBAAA,wBAAA,wBAAA,cAAA,kBAAA,gBAAA,wBACF,2BAAE,wBAAA,uBAAA,cAAA,qBAAA,gBAAA,uBAEF,sBAAE,kBAAA,gBAAA,oBAAA,qBAAA,eAAA,gBAAA,YAAA,qBACF,oBAAE,kBAAA,cAAA,oBAAA,mBAAA,eAAA,cAAA,YAAA,mBACF,uBAAE,kBAAA,iBAAA,oBAAA,iBAAA,eAAA,iBAAA,YAAA,iBACF,yBAAE,kBAAA,mBAAA,oBAAA,mBAAA,eAAA,mBAAA,YAAA,mBACF,wBAAE,kBAAA,kBAAA,oBAAA,kBAAA,eAAA,kBAAA,YAAA,kBAEF,wBAAE,sBAAA,qBAAA,mBAAA,gBAAA,cAAA,qBACF,sBAAE,sBAAA,mBAAA,mBAAA,cAAA,cAAA,mBACF,yBAAE,sBAAA,iBAAA,mBAAA,iBAAA,cAAA,iBACF,0BAAE,sBAAA,wBAAA,mBAAA,kBAAA,cAAA,wBACF,yBAAE,sBAAA,uBAAA,mBAAA,qBAAA,cAAA,uBACF,0BAAE,sBAAA,kBAAA,mBAAA,kBAAA,cAAA,kBAEF,oBAAE,mBAAA,eAAA,oBAAA,eAAA,mBAAA,eAAA,WAAA,eACF,qBAAE,mBAAA,qBAAA,oBAAA,gBAAA,WAAA,qBACF,mBAAE,mBAAA,mBAAA,oBAAA,cAAA,WAAA,mBACF,sBAAE,mBAAA,iBAAA,oBAAA,iBAAA,mBAAA,iBAAA,WAAA,iBACF,wBAAE,mBAAA,mBAAA,oBAAA,mBAAA,WAAA,mBACF,uBAAE,mBAAA,kBAAA,oBAAA,kBAAA,mBAAA,kBAAA,WAAA,mB7CWF,yB6ChDA,eAAE,0BAAA,EAAA,cAAA,GAAA,eAAA,GAAA,MAAA,GACF,cAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EACF,mBAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EAEF,aAAE,mBAAA,qBAAA,sBAAA,iBAAA,uBAAA,cAAA,mBAAA,cAAA,eAAA,cACF,gBAAE,mBAAA,mBAAA,sBAAA,iBAAA,uBAAA,iBAAA,mBAAA,iBAAA,eAAA,iBACF,qBAAE,mBAAA,qBAAA,sBAAA,kBAAA,uBAAA,sBAAA,mBAAA,sBAAA,eAAA,sBACF,wBAAE,mBAAA,mBAAA,sBAAA,kBAAA,uBAAA,yBAAA,mBAAA,yBAAA,eAAA,yBAEF,cAAE,kBAAA,eAAA,cAAA,eAAA,UAAA,eACF,gBAAE,kBAAA,iBAAA,cAAA,iBAAA,UAAA,iBACF,sBAAE,kBAAA,uBAAA,cAAA,uBAAA,UAAA,uBAEF,0BAAE,iBAAA,gBAAA,wBAAA,qBAAA,cAAA,gBAAA,gBAAA,qBACF,wBAAE,iBAAA,cAAA,wBAAA,mBAAA,cAAA,cAAA,gBAAA,mBACF,2BAAE,iBAAA,iBAAA,wBAAA,iBAAA,cAAA,iBAAA,gBAAA,iBACF,4BAAE,iBAAA,kBAAA,wBAAA,wBAAA,cAAA,kBAAA,gBAAA,wBACF,2BAAE,wBAAA,uBAAA,cAAA,qBAAA,gBAAA,uBAEF,sBAAE,kBAAA,gBAAA,oBAAA,qBAAA,eAAA,gBAAA,YAAA,qBACF,oBAAE,kBAAA,cAAA,oBAAA,mBAAA,eAAA,cAAA,YAAA,mBACF,uBAAE,kBAAA,iBAAA,oBAAA,iBAAA,eAAA,iBAAA,YAAA,iBACF,yBAAE,kBAAA,mBAAA,oBAAA,mBAAA,eAAA,mBAAA,YAAA,mBACF,wBAAE,kBAAA,kBAAA,oBAAA,kBAAA,eAAA,kBAAA,YAAA,kBAEF,wBAAE,sBAAA,qBAAA,mBAAA,gBAAA,cAAA,qBACF,sBAAE,sBAAA,mBAAA,mBAAA,cAAA,cAAA,mBACF,yBAAE,sBAAA,iBAAA,mBAAA,iBAAA,cAAA,iBACF,0BAAE,sBAAA,wBAAA,mBAAA,kBAAA,cAAA,wBACF,yBAAE,sBAAA,uBAAA,mBAAA,qBAAA,cAAA,uBACF,0BAAE,sBAAA,kBAAA,mBAAA,kBAAA,cAAA,kBAEF,oBAAE,mBAAA,eAAA,oBAAA,eAAA,mBAAA,eAAA,WAAA,eACF,qBAAE,mBAAA,qBAAA,oBAAA,gBAAA,WAAA,qBACF,mBAAE,mBAAA,mBAAA,oBAAA,cAAA,WAAA,mBACF,sBAAE,mBAAA,iBAAA,oBAAA,iBAAA,mBAAA,iBAAA,WAAA,iBACF,wBAAE,mBAAA,mBAAA,oBAAA,mBAAA,WAAA,mBACF,uBAAE,mBAAA,kBAAA,oBAAA,kBAAA,mBAAA,kBAAA,WAAA,mB7CWF,0B6ChDA,eAAE,0BAAA,EAAA,cAAA,GAAA,eAAA,GAAA,MAAA,GACF,cAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EACF,mBAAE,0BAAA,EAAA,cAAA,EAAA,eAAA,EAAA,MAAA,EAEF,aAAE,mBAAA,qBAAA,sBAAA,iBAAA,uBAAA,cAAA,mBAAA,cAAA,eAAA,cACF,gBAAE,mBAAA,mBAAA,sBAAA,iBAAA,uBAAA,iBAAA,mBAAA,iBAAA,eAAA,iBACF,qBAAE,mBAAA,qBAAA,sBAAA,kBAAA,uBAAA,sBAAA,mBAAA,sBAAA,eAAA,sBACF,wBAAE,mBAAA,mBAAA,sBAAA,kBAAA,uBAAA,yBAAA,mBAAA,yBAAA,eAAA,yBAEF,cAAE,kBAAA,eAAA,cAAA,eAAA,UAAA,eACF,gBAAE,kBAAA,iBAAA,cAAA,iBAAA,UAAA,iBACF,sBAAE,kBAAA,uBAAA,cAAA,uBAAA,UAAA,uBAEF,0BAAE,iBAAA,gBAAA,wBAAA,qBAAA,cAAA,gBAAA,gBAAA,qBACF,wBAAE,iBAAA,cAAA,wBAAA,mBAAA,cAAA,cAAA,gBAAA,mBACF,2BAAE,iBAAA,iBAAA,wBAAA,iBAAA,cAAA,iBAAA,gBAAA,iBACF,4BAAE,iBAAA,kBAAA,wBAAA,wBAAA,cAAA,kBAAA,gBAAA,wBACF,2BAAE,wBAAA,uBAAA,cAAA,qBAAA,gBAAA,uBAEF,sBAAE,kBAAA,gBAAA,oBAAA,qBAAA,eAAA,gBAAA,YAAA,qBACF,oBAAE,kBAAA,cAAA,oBAAA,mBAAA,eAAA,cAAA,YAAA,mBACF,uBAAE,kBAAA,iBAAA,oBAAA,iBAAA,eAAA,iBAAA,YAAA,iBACF,yBAAE,kBAAA,mBAAA,oBAAA,mBAAA,eAAA,mBAAA,YAAA,mBACF,wBAAE,kBAAA,kBAAA,oBAAA,kBAAA,eAAA,kBAAA,YAAA,kBAEF,wBAAE,sBAAA,qBAAA,mBAAA,gBAAA,cAAA,qBACF,sBAAE,sBAAA,mBAAA,mBAAA,cAAA,cAAA,mBACF,yBAAE,sBAAA,iBAAA,mBAAA,iBAAA,cAAA,iBACF,0BAAE,sBAAA,wBAAA,mBAAA,kBAAA,cAAA,wBACF,yBAAE,sBAAA,uBAAA,mBAAA,qBAAA,cAAA,uBACF,0BAAE,sBAAA,kBAAA,mBAAA,kBAAA,cAAA,kBAEF,oBAAE,mBAAA,eAAA,oBAAA,eAAA,mBAAA,eAAA,WAAA,eACF,qBAAE,mBAAA,qBAAA,oBAAA,gBAAA,WAAA,qBACF,mBAAE,mBAAA,mBAAA,oBAAA,cAAA,WAAA,mBACF,sBAAE,mBAAA,iBAAA,oBAAA,iBAAA,mBAAA,iBAAA,WAAA,iBACF,wBAAE,mBAAA,mBAAA,oBAAA,mBAAA,WAAA,mBACF,uBAAE,mBAAA,kBAAA,oBAAA,kBAAA,mBAAA,kBAAA,WAAA,mBCzCF,YCHF,MAAA,eDIE,aCDF,MAAA,gBDEE,YCCF,MAAA,e/CiDE,yB8CpDA,eCHF,MAAA,eDIE,gBCDF,MAAA,gBDEE,eCCF,MAAA,gB/CiDE,yB8CpDA,eCHF,MAAA,eDIE,gBCDF,MAAA,gBDEE,eCCF,MAAA,gB/CiDE,yB8CpDA,eCHF,MAAA,eDIE,gBCDF,MAAA,gBDEE,eCCF,MAAA,gB/CiDE,0B8CpDA,eCHF,MAAA,eDIE,gBCDF,MAAA,gBDEE,eCCF,MAAA,gBCLF,WACE,SAAA,MACA,IAAA,EACA,MAAA,EACA,KAAA,EACA,QAAA,KAGF,cACE,SAAA,MACA,MAAA,EACA,OAAA,EACA,KAAA,EACA,QAAA,KAGF,YACE,SAAA,eAAA,SAAA,OACA,IAAA,EACA,QAAA,KCjBF,SCCE,SAAA,SACA,MAAA,IACA,OAAA,IACA,QAAA,EACA,OAAA,KACA,SAAA,OACA,KAAA,cACA,OAAA,EAUA,0BAAA,yBAEE,SAAA,OACA,MAAA,KACA,OAAA,KACA,OAAA,EACA,SAAA,QACA,KAAA,KCzBA,MAAE,MAAA,cAAF,MAAE,MAAA,cAAF,MAAE,MAAA,cAAF,OAAE,MAAA,eAAF,MAAE,OAAA,cAAF,MAAE,OAAA,cAAF,MAAE,OAAA,cAAF,OAAE,OAAA,eAIN,QAAU,UAAA,eACV,QAAU,WAAA,eCEF,KAAE,OAAA,EAAA,YACF,MAAE,WAAA,YACF,MAAE,aAAA,YACF,MAAE,cAAA,YACF,MAAE,YAAA,YACF,MACE,aAAA,YACA,YAAA,YAEF,MACE,WAAA,YACA,cAAA,YAXF,KAAE,OAAA,OAAA,iBACF,MAAE,WAAA,iBACF,MAAE,aAAA,iBACF,MAAE,cAAA,iBACF,MAAE,YAAA,iBACF,MACE,aAAA,iBACA,YAAA,iBAEF,MACE,WAAA,iBACA,cAAA,iBAXF,KAAE,OAAA,MAAA,gBACF,MAAE,WAAA,gBACF,MAAE,aAAA,gBACF,MAAE,cAAA,gBACF,MAAE,YAAA,gBACF,MACE,aAAA,gBACA,YAAA,gBAEF,MACE,WAAA,gBACA,cAAA,gBAXF,KAAE,OAAA,KAAA,eACF,MAAE,WAAA,eACF,MAAE,aAAA,eACF,MAAE,cAAA,eACF,MAAE,YAAA,eACF,MACE,aAAA,eACA,YAAA,eAEF,MACE,WAAA,eACA,cAAA,eAXF,KAAE,OAAA,OAAA,iBACF,MAAE,WAAA,iBACF,MAAE,aAAA,iBACF,MAAE,cAAA,iBACF,MAAE,YAAA,iBACF,MACE,aAAA,iBACA,YAAA,iBAEF,MACE,WAAA,iBACA,cAAA,iBAXF,KAAE,OAAA,KAAA,eACF,MAAE,WAAA,eACF,MAAE,aAAA,eACF,MAAE,cAAA,eACF,MAAE,YAAA,eACF,MACE,aAAA,eACA,YAAA,eAEF,MACE,WAAA,eACA,cAAA,eAXF,KAAE,QAAA,EAAA,YACF,MAAE,YAAA,YACF,MAAE,cAAA,YACF,MAAE,eAAA,YACF,MAAE,aAAA,YACF,MACE,cAAA,YACA,aAAA,YAEF,MACE,YAAA,YACA,eAAA,YAXF,KAAE,QAAA,OAAA,iBACF,MAAE,YAAA,iBACF,MAAE,cAAA,iBACF,MAAE,eAAA,iBACF,MAAE,aAAA,iBACF,MACE,cAAA,iBACA,aAAA,iBAEF,MACE,YAAA,iBACA,eAAA,iBAXF,KAAE,QAAA,MAAA,gBACF,MAAE,YAAA,gBACF,MAAE,cAAA,gBACF,MAAE,eAAA,gBACF,MAAE,aAAA,gBACF,MACE,cAAA,gBACA,aAAA,gBAEF,MACE,YAAA,gBACA,eAAA,gBAXF,KAAE,QAAA,KAAA,eACF,MAAE,YAAA,eACF,MAAE,cAAA,eACF,MAAE,eAAA,eACF,MAAE,aAAA,eACF,MACE,cAAA,eACA,aAAA,eAEF,MACE,YAAA,eACA,eAAA,eAXF,KAAE,QAAA,OAAA,iBACF,MAAE,YAAA,iBACF,MAAE,cAAA,iBACF,MAAE,eAAA,iBACF,MAAE,aAAA,iBACF,MACE,cAAA,iBACA,aAAA,iBAEF,MACE,YAAA,iBACA,eAAA,iBAXF,KAAE,QAAA,KAAA,eACF,MAAE,YAAA,eACF,MAAE,cAAA,eACF,MAAE,eAAA,eACF,MAAE,aAAA,eACF,MACE,cAAA,eACA,aAAA,eAEF,MACE,YAAA,eACA,eAAA,eAMN,QAAE,OAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,epDiBF,yBoD7CI,QAAE,OAAA,EAAA,YACF,SAAE,WAAA,YACF,SAAE,aAAA,YACF,SAAE,cAAA,YACF,SAAE,YAAA,YACF,SACE,aAAA,YACA,YAAA,YAEF,SACE,WAAA,YACA,cAAA,YAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,MAAA,gBACF,SAAE,WAAA,gBACF,SAAE,aAAA,gBACF,SAAE,cAAA,gBACF,SAAE,YAAA,gBACF,SACE,aAAA,gBACA,YAAA,gBAEF,SACE,WAAA,gBACA,cAAA,gBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,QAAA,EAAA,YACF,SAAE,YAAA,YACF,SAAE,cAAA,YACF,SAAE,eAAA,YACF,SAAE,aAAA,YACF,SACE,cAAA,YACA,aAAA,YAEF,SACE,YAAA,YACA,eAAA,YAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,MAAA,gBACF,SAAE,YAAA,gBACF,SAAE,cAAA,gBACF,SAAE,eAAA,gBACF,SAAE,aAAA,gBACF,SACE,cAAA,gBACA,aAAA,gBAEF,SACE,YAAA,gBACA,eAAA,gBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAMN,WAAE,OAAA,eACF,YAAE,WAAA,eACF,YAAE,aAAA,eACF,YAAE,cAAA,eACF,YAAE,YAAA,eACF,YACE,aAAA,eACA,YAAA,eAEF,YACE,WAAA,eACA,cAAA,gBpDiBF,yBoD7CI,QAAE,OAAA,EAAA,YACF,SAAE,WAAA,YACF,SAAE,aAAA,YACF,SAAE,cAAA,YACF,SAAE,YAAA,YACF,SACE,aAAA,YACA,YAAA,YAEF,SACE,WAAA,YACA,cAAA,YAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,MAAA,gBACF,SAAE,WAAA,gBACF,SAAE,aAAA,gBACF,SAAE,cAAA,gBACF,SAAE,YAAA,gBACF,SACE,aAAA,gBACA,YAAA,gBAEF,SACE,WAAA,gBACA,cAAA,gBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,QAAA,EAAA,YACF,SAAE,YAAA,YACF,SAAE,cAAA,YACF,SAAE,eAAA,YACF,SAAE,aAAA,YACF,SACE,cAAA,YACA,aAAA,YAEF,SACE,YAAA,YACA,eAAA,YAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,MAAA,gBACF,SAAE,YAAA,gBACF,SAAE,cAAA,gBACF,SAAE,eAAA,gBACF,SAAE,aAAA,gBACF,SACE,cAAA,gBACA,aAAA,gBAEF,SACE,YAAA,gBACA,eAAA,gBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAMN,WAAE,OAAA,eACF,YAAE,WAAA,eACF,YAAE,aAAA,eACF,YAAE,cAAA,eACF,YAAE,YAAA,eACF,YACE,aAAA,eACA,YAAA,eAEF,YACE,WAAA,eACA,cAAA,gBpDiBF,yBoD7CI,QAAE,OAAA,EAAA,YACF,SAAE,WAAA,YACF,SAAE,aAAA,YACF,SAAE,cAAA,YACF,SAAE,YAAA,YACF,SACE,aAAA,YACA,YAAA,YAEF,SACE,WAAA,YACA,cAAA,YAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,MAAA,gBACF,SAAE,WAAA,gBACF,SAAE,aAAA,gBACF,SAAE,cAAA,gBACF,SAAE,YAAA,gBACF,SACE,aAAA,gBACA,YAAA,gBAEF,SACE,WAAA,gBACA,cAAA,gBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,QAAA,EAAA,YACF,SAAE,YAAA,YACF,SAAE,cAAA,YACF,SAAE,eAAA,YACF,SAAE,aAAA,YACF,SACE,cAAA,YACA,aAAA,YAEF,SACE,YAAA,YACA,eAAA,YAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,MAAA,gBACF,SAAE,YAAA,gBACF,SAAE,cAAA,gBACF,SAAE,eAAA,gBACF,SAAE,aAAA,gBACF,SACE,cAAA,gBACA,aAAA,gBAEF,SACE,YAAA,gBACA,eAAA,gBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAMN,WAAE,OAAA,eACF,YAAE,WAAA,eACF,YAAE,aAAA,eACF,YAAE,cAAA,eACF,YAAE,YAAA,eACF,YACE,aAAA,eACA,YAAA,eAEF,YACE,WAAA,eACA,cAAA,gBpDiBF,0BoD7CI,QAAE,OAAA,EAAA,YACF,SAAE,WAAA,YACF,SAAE,aAAA,YACF,SAAE,cAAA,YACF,SAAE,YAAA,YACF,SACE,aAAA,YACA,YAAA,YAEF,SACE,WAAA,YACA,cAAA,YAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,MAAA,gBACF,SAAE,WAAA,gBACF,SAAE,aAAA,gBACF,SAAE,cAAA,gBACF,SAAE,YAAA,gBACF,SACE,aAAA,gBACA,YAAA,gBAEF,SACE,WAAA,gBACA,cAAA,gBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,OAAA,OAAA,iBACF,SAAE,WAAA,iBACF,SAAE,aAAA,iBACF,SAAE,cAAA,iBACF,SAAE,YAAA,iBACF,SACE,aAAA,iBACA,YAAA,iBAEF,SACE,WAAA,iBACA,cAAA,iBAXF,QAAE,OAAA,KAAA,eACF,SAAE,WAAA,eACF,SAAE,aAAA,eACF,SAAE,cAAA,eACF,SAAE,YAAA,eACF,SACE,aAAA,eACA,YAAA,eAEF,SACE,WAAA,eACA,cAAA,eAXF,QAAE,QAAA,EAAA,YACF,SAAE,YAAA,YACF,SAAE,cAAA,YACF,SAAE,eAAA,YACF,SAAE,aAAA,YACF,SACE,cAAA,YACA,aAAA,YAEF,SACE,YAAA,YACA,eAAA,YAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,MAAA,gBACF,SAAE,YAAA,gBACF,SAAE,cAAA,gBACF,SAAE,eAAA,gBACF,SAAE,aAAA,gBACF,SACE,cAAA,gBACA,aAAA,gBAEF,SACE,YAAA,gBACA,eAAA,gBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAXF,QAAE,QAAA,OAAA,iBACF,SAAE,YAAA,iBACF,SAAE,cAAA,iBACF,SAAE,eAAA,iBACF,SAAE,aAAA,iBACF,SACE,cAAA,iBACA,aAAA,iBAEF,SACE,YAAA,iBACA,eAAA,iBAXF,QAAE,QAAA,KAAA,eACF,SAAE,YAAA,eACF,SAAE,cAAA,eACF,SAAE,eAAA,eACF,SAAE,aAAA,eACF,SACE,cAAA,eACA,aAAA,eAEF,SACE,YAAA,eACA,eAAA,eAMN,WAAE,OAAA,eACF,YAAE,WAAA,eACF,YAAE,aAAA,eACF,YAAE,cAAA,eACF,YAAE,YAAA,eACF,YACE,aAAA,eACA,YAAA,eAEF,YACE,WAAA,eACA,cAAA,gBCjCN,cAAiB,WAAA,kBACjB,aAAiB,YAAA,iBACjB,eCJE,SAAA,OACA,cAAA,SACA,YAAA,ODUE,WAAE,WAAA,eACF,YAAE,WAAA,gBACF,aAAE,WAAA,iBrDsCF,yBqDxCA,cAAE,WAAA,eACF,eAAE,WAAA,gBACF,gBAAE,WAAA,kBrDsCF,yBqDxCA,cAAE,WAAA,eACF,eAAE,WAAA,gBACF,gBAAE,WAAA,kBrDsCF,yBqDxCA,cAAE,WAAA,eACF,eAAE,WAAA,gBACF,gBAAE,WAAA,kBrDsCF,0BqDxCA,cAAE,WAAA,eACF,eAAE,WAAA,gBACF,gBAAE,WAAA,kBAMN,gBAAmB,eAAA,oBACnB,gBAAmB,eAAA,oBACnB,iBAAmB,eAAA,qBAInB,oBAAsB,YAAA,IACtB,kBAAsB,YAAA,IACtB,aAAsB,WAAA,OAItB,YACE,MAAA,eElCA,YACE,MAAA,kBjEgBA,mBAAA,mBiEZE,MAAA,kBALJ,cACE,MAAA,kBjEgBA,qBAAA,qBiEZE,MAAA,kBALJ,cACE,MAAA,kBjEgBA,qBAAA,qBiEZE,MAAA,kBALJ,WACE,MAAA,kBjEgBA,kBAAA,kBiEZE,MAAA,kBALJ,cACE,MAAA,kBjEgBA,qBAAA,qBiEZE,MAAA,kBALJ,aACE,MAAA,kBjEgBA,oBAAA,oBiEZE,MAAA,kBALJ,gBACE,MAAA,kBjEgBA,uBAAA,uBiEZE,MAAA,kBFkDN,WGxDE,KAAA,EAAA,EAAA,EACA,MAAA,YACA,YAAA,KACA,iBAAA,YACA,OAAA,ECFF,WCDE,WAAA,iBDQA,cAEI,QAAA,ezDwDF,yByDrDF,gBAEI,QAAA,gBzDsCF,yByD7CF,cAEI,QAAA,gBzDwDF,yByDrDF,gBAEI,QAAA,gBzDsCF,yByD7CF,cAEI,QAAA,gBzDwDF,yByDrDF,gBAEI,QAAA,gBzDsCF,yByD7CF,cAEI,QAAA,gBzDwDF,0ByDrDF,gBAEI,QAAA,gBzDsCF,0ByD7CF,cAEI,QAAA,gBAGJ,gBAEI,QAAA,eAUN,qBACE,QAAA,eAEA,aAHA,qBAIE,QAAA,iBAGJ,sBACE,QAAA,eAEA,aAHA,sBAIE,QAAA,kBAGJ,4BACE,QAAA,eAEA,aAHA,4BAIE,QAAA,wBAKF,aADA,cAEE,QAAA"} \ No newline at end of file diff --git a/tasks/chupiflow_ui/static/bootstrap/js/bootstrap.min.js b/tasks/chupiflow_ui/static/bootstrap/js/bootstrap.min.js deleted file mode 100644 index d9c72dfc..00000000 --- a/tasks/chupiflow_ui/static/bootstrap/js/bootstrap.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Bootstrap v4.0.0-alpha.6 (https://getbootstrap.com) - * Copyright 2011-2017 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");+function(t){var e=t.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1==e[0]&&9==e[1]&&e[2]<1||e[0]>=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(jQuery),+function(){function t(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}function e(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}function n(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}var i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},o=function(){function t(t,e){for(var n=0;nthis._items.length-1||e<0)){if(this._isSliding)return void t(this._element).one(m.SLID,function(){return n.to(e)});if(i===e)return this.pause(),void this.cycle();var o=e>i?p.NEXT:p.PREVIOUS;this._slide(o,this._items[e])}},h.prototype.dispose=function(){t(this._element).off(l),t.removeData(this._element,a),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},h.prototype._getConfig=function(n){return n=t.extend({},_,n),r.typeCheckConfig(e,n,g),n},h.prototype._addEventListeners=function(){var e=this;this._config.keyboard&&t(this._element).on(m.KEYDOWN,function(t){return e._keydown(t)}),"hover"!==this._config.pause||"ontouchstart"in document.documentElement||t(this._element).on(m.MOUSEENTER,function(t){return e.pause(t)}).on(m.MOUSELEAVE,function(t){return e.cycle(t)})},h.prototype._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case d:t.preventDefault(),this.prev();break;case f:t.preventDefault(),this.next();break;default:return}},h.prototype._getItemIndex=function(e){return this._items=t.makeArray(t(e).parent().find(v.ITEM)),this._items.indexOf(e)},h.prototype._getItemByDirection=function(t,e){var n=t===p.NEXT,i=t===p.PREVIOUS,o=this._getItemIndex(e),r=this._items.length-1,s=i&&0===o||n&&o===r;if(s&&!this._config.wrap)return e;var a=t===p.PREVIOUS?-1:1,l=(o+a)%this._items.length;return l===-1?this._items[this._items.length-1]:this._items[l]},h.prototype._triggerSlideEvent=function(e,n){var i=t.Event(m.SLIDE,{relatedTarget:e,direction:n});return t(this._element).trigger(i),i},h.prototype._setActiveIndicatorElement=function(e){if(this._indicatorsElement){t(this._indicatorsElement).find(v.ACTIVE).removeClass(E.ACTIVE);var n=this._indicatorsElement.children[this._getItemIndex(e)];n&&t(n).addClass(E.ACTIVE)}},h.prototype._slide=function(e,n){var i=this,o=t(this._element).find(v.ACTIVE_ITEM)[0],s=n||o&&this._getItemByDirection(e,o),a=Boolean(this._interval),l=void 0,h=void 0,c=void 0;if(e===p.NEXT?(l=E.LEFT,h=E.NEXT,c=p.LEFT):(l=E.RIGHT,h=E.PREV,c=p.RIGHT),s&&t(s).hasClass(E.ACTIVE))return void(this._isSliding=!1);var d=this._triggerSlideEvent(s,c);if(!d.isDefaultPrevented()&&o&&s){this._isSliding=!0,a&&this.pause(),this._setActiveIndicatorElement(s);var f=t.Event(m.SLID,{relatedTarget:s,direction:c});r.supportsTransitionEnd()&&t(this._element).hasClass(E.SLIDE)?(t(s).addClass(h),r.reflow(s),t(o).addClass(l),t(s).addClass(l),t(o).one(r.TRANSITION_END,function(){t(s).removeClass(l+" "+h).addClass(E.ACTIVE),t(o).removeClass(E.ACTIVE+" "+h+" "+l),i._isSliding=!1,setTimeout(function(){return t(i._element).trigger(f)},0)}).emulateTransitionEnd(u)):(t(o).removeClass(E.ACTIVE),t(s).addClass(E.ACTIVE),this._isSliding=!1,t(this._element).trigger(f)),a&&this.cycle()}},h._jQueryInterface=function(e){return this.each(function(){var n=t(this).data(a),o=t.extend({},_,t(this).data());"object"===("undefined"==typeof e?"undefined":i(e))&&t.extend(o,e);var r="string"==typeof e?e:o.slide;if(n||(n=new h(this,o),t(this).data(a,n)),"number"==typeof e)n.to(e);else if("string"==typeof r){if(void 0===n[r])throw new Error('No method named "'+r+'"');n[r]()}else o.interval&&(n.pause(),n.cycle())})},h._dataApiClickHandler=function(e){var n=r.getSelectorFromElement(this);if(n){var i=t(n)[0];if(i&&t(i).hasClass(E.CAROUSEL)){var o=t.extend({},t(i).data(),t(this).data()),s=this.getAttribute("data-slide-to");s&&(o.interval=!1),h._jQueryInterface.call(t(i),o),s&&t(i).data(a).to(s),e.preventDefault()}}},o(h,null,[{key:"VERSION",get:function(){return s}},{key:"Default",get:function(){return _}}]),h}();return t(document).on(m.CLICK_DATA_API,v.DATA_SLIDE,T._dataApiClickHandler),t(window).on(m.LOAD_DATA_API,function(){t(v.DATA_RIDE).each(function(){var e=t(this);T._jQueryInterface.call(e,e.data())})}),t.fn[e]=T._jQueryInterface,t.fn[e].Constructor=T,t.fn[e].noConflict=function(){return t.fn[e]=c,T._jQueryInterface},T}(jQuery),function(t){var e="collapse",s="4.0.0-alpha.6",a="bs.collapse",l="."+a,h=".data-api",c=t.fn[e],u=600,d={toggle:!0,parent:""},f={toggle:"boolean",parent:"string"},_={SHOW:"show"+l,SHOWN:"shown"+l,HIDE:"hide"+l,HIDDEN:"hidden"+l,CLICK_DATA_API:"click"+l+h},g={SHOW:"show",COLLAPSE:"collapse",COLLAPSING:"collapsing",COLLAPSED:"collapsed"},p={WIDTH:"width",HEIGHT:"height"},m={ACTIVES:".card > .show, .card > .collapsing",DATA_TOGGLE:'[data-toggle="collapse"]'},E=function(){function l(e,i){n(this,l),this._isTransitioning=!1,this._element=e,this._config=this._getConfig(i),this._triggerArray=t.makeArray(t('[data-toggle="collapse"][href="#'+e.id+'"],'+('[data-toggle="collapse"][data-target="#'+e.id+'"]'))),this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}return l.prototype.toggle=function(){t(this._element).hasClass(g.SHOW)?this.hide():this.show()},l.prototype.show=function(){var e=this;if(this._isTransitioning)throw new Error("Collapse is transitioning");if(!t(this._element).hasClass(g.SHOW)){var n=void 0,i=void 0;if(this._parent&&(n=t.makeArray(t(this._parent).find(m.ACTIVES)),n.length||(n=null)),!(n&&(i=t(n).data(a),i&&i._isTransitioning))){var o=t.Event(_.SHOW);if(t(this._element).trigger(o),!o.isDefaultPrevented()){n&&(l._jQueryInterface.call(t(n),"hide"),i||t(n).data(a,null));var s=this._getDimension();t(this._element).removeClass(g.COLLAPSE).addClass(g.COLLAPSING),this._element.style[s]=0,this._element.setAttribute("aria-expanded",!0),this._triggerArray.length&&t(this._triggerArray).removeClass(g.COLLAPSED).attr("aria-expanded",!0),this.setTransitioning(!0);var h=function(){t(e._element).removeClass(g.COLLAPSING).addClass(g.COLLAPSE).addClass(g.SHOW),e._element.style[s]="",e.setTransitioning(!1),t(e._element).trigger(_.SHOWN)};if(!r.supportsTransitionEnd())return void h();var c=s[0].toUpperCase()+s.slice(1),d="scroll"+c;t(this._element).one(r.TRANSITION_END,h).emulateTransitionEnd(u),this._element.style[s]=this._element[d]+"px"}}}},l.prototype.hide=function(){var e=this;if(this._isTransitioning)throw new Error("Collapse is transitioning");if(t(this._element).hasClass(g.SHOW)){var n=t.Event(_.HIDE);if(t(this._element).trigger(n),!n.isDefaultPrevented()){var i=this._getDimension(),o=i===p.WIDTH?"offsetWidth":"offsetHeight";this._element.style[i]=this._element[o]+"px",r.reflow(this._element),t(this._element).addClass(g.COLLAPSING).removeClass(g.COLLAPSE).removeClass(g.SHOW),this._element.setAttribute("aria-expanded",!1),this._triggerArray.length&&t(this._triggerArray).addClass(g.COLLAPSED).attr("aria-expanded",!1),this.setTransitioning(!0);var s=function(){e.setTransitioning(!1),t(e._element).removeClass(g.COLLAPSING).addClass(g.COLLAPSE).trigger(_.HIDDEN)};return this._element.style[i]="",r.supportsTransitionEnd()?void t(this._element).one(r.TRANSITION_END,s).emulateTransitionEnd(u):void s()}}},l.prototype.setTransitioning=function(t){this._isTransitioning=t},l.prototype.dispose=function(){t.removeData(this._element,a),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},l.prototype._getConfig=function(n){return n=t.extend({},d,n),n.toggle=Boolean(n.toggle),r.typeCheckConfig(e,n,f),n},l.prototype._getDimension=function(){var e=t(this._element).hasClass(p.WIDTH);return e?p.WIDTH:p.HEIGHT},l.prototype._getParent=function(){var e=this,n=t(this._config.parent)[0],i='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]';return t(n).find(i).each(function(t,n){e._addAriaAndCollapsedClass(l._getTargetFromElement(n),[n])}),n},l.prototype._addAriaAndCollapsedClass=function(e,n){if(e){var i=t(e).hasClass(g.SHOW);e.setAttribute("aria-expanded",i),n.length&&t(n).toggleClass(g.COLLAPSED,!i).attr("aria-expanded",i)}},l._getTargetFromElement=function(e){var n=r.getSelectorFromElement(e);return n?t(n)[0]:null},l._jQueryInterface=function(e){return this.each(function(){var n=t(this),o=n.data(a),r=t.extend({},d,n.data(),"object"===("undefined"==typeof e?"undefined":i(e))&&e);if(!o&&r.toggle&&/show|hide/.test(e)&&(r.toggle=!1),o||(o=new l(this,r),n.data(a,o)),"string"==typeof e){if(void 0===o[e])throw new Error('No method named "'+e+'"');o[e]()}})},o(l,null,[{key:"VERSION",get:function(){return s}},{key:"Default",get:function(){return d}}]),l}();return t(document).on(_.CLICK_DATA_API,m.DATA_TOGGLE,function(e){e.preventDefault();var n=E._getTargetFromElement(this),i=t(n).data(a),o=i?"toggle":t(this).data();E._jQueryInterface.call(t(n),o)}),t.fn[e]=E._jQueryInterface,t.fn[e].Constructor=E,t.fn[e].noConflict=function(){return t.fn[e]=c,E._jQueryInterface},E}(jQuery),function(t){var e="dropdown",i="4.0.0-alpha.6",s="bs.dropdown",a="."+s,l=".data-api",h=t.fn[e],c=27,u=38,d=40,f=3,_={HIDE:"hide"+a,HIDDEN:"hidden"+a,SHOW:"show"+a,SHOWN:"shown"+a,CLICK:"click"+a,CLICK_DATA_API:"click"+a+l,FOCUSIN_DATA_API:"focusin"+a+l,KEYDOWN_DATA_API:"keydown"+a+l},g={BACKDROP:"dropdown-backdrop",DISABLED:"disabled",SHOW:"show"},p={BACKDROP:".dropdown-backdrop",DATA_TOGGLE:'[data-toggle="dropdown"]',FORM_CHILD:".dropdown form",ROLE_MENU:'[role="menu"]',ROLE_LISTBOX:'[role="listbox"]',NAVBAR_NAV:".navbar-nav",VISIBLE_ITEMS:'[role="menu"] li:not(.disabled) a, [role="listbox"] li:not(.disabled) a'},m=function(){function e(t){n(this,e),this._element=t,this._addEventListeners()}return e.prototype.toggle=function(){if(this.disabled||t(this).hasClass(g.DISABLED))return!1;var n=e._getParentFromElement(this),i=t(n).hasClass(g.SHOW);if(e._clearMenus(),i)return!1;if("ontouchstart"in document.documentElement&&!t(n).closest(p.NAVBAR_NAV).length){var o=document.createElement("div");o.className=g.BACKDROP,t(o).insertBefore(this),t(o).on("click",e._clearMenus)}var r={relatedTarget:this},s=t.Event(_.SHOW,r);return t(n).trigger(s),!s.isDefaultPrevented()&&(this.focus(),this.setAttribute("aria-expanded",!0),t(n).toggleClass(g.SHOW),t(n).trigger(t.Event(_.SHOWN,r)),!1)},e.prototype.dispose=function(){t.removeData(this._element,s),t(this._element).off(a),this._element=null},e.prototype._addEventListeners=function(){t(this._element).on(_.CLICK,this.toggle)},e._jQueryInterface=function(n){return this.each(function(){var i=t(this).data(s);if(i||(i=new e(this),t(this).data(s,i)),"string"==typeof n){if(void 0===i[n])throw new Error('No method named "'+n+'"');i[n].call(this)}})},e._clearMenus=function(n){if(!n||n.which!==f){var i=t(p.BACKDROP)[0];i&&i.parentNode.removeChild(i);for(var o=t.makeArray(t(p.DATA_TOGGLE)),r=0;r0&&a--,n.which===d&&adocument.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},h.prototype._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},h.prototype._checkScrollbar=function(){this._isBodyOverflowing=document.body.clientWidth=n){var i=this._targets[this._targets.length-1];return void(this._activeTarget!==i&&this._activate(i))}if(this._activeTarget&&t0)return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;){var r=this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&(void 0===this._offsets[o+1]||t "+g.NAV_LINKS).addClass(_.ACTIVE),t(this._scrollElement).trigger(f.ACTIVATE,{relatedTarget:e})},h.prototype._clear=function(){t(this._selector).filter(g.ACTIVE).removeClass(_.ACTIVE)},h._jQueryInterface=function(e){return this.each(function(){var n=t(this).data(a),o="object"===("undefined"==typeof e?"undefined":i(e))&&e; -if(n||(n=new h(this,o),t(this).data(a,n)),"string"==typeof e){if(void 0===n[e])throw new Error('No method named "'+e+'"');n[e]()}})},o(h,null,[{key:"VERSION",get:function(){return s}},{key:"Default",get:function(){return u}}]),h}();return t(window).on(f.LOAD_DATA_API,function(){for(var e=t.makeArray(t(g.DATA_SPY)),n=e.length;n--;){var i=t(e[n]);m._jQueryInterface.call(i,i.data())}}),t.fn[e]=m._jQueryInterface,t.fn[e].Constructor=m,t.fn[e].noConflict=function(){return t.fn[e]=c,m._jQueryInterface},m}(jQuery),function(t){var e="tab",i="4.0.0-alpha.6",s="bs.tab",a="."+s,l=".data-api",h=t.fn[e],c=150,u={HIDE:"hide"+a,HIDDEN:"hidden"+a,SHOW:"show"+a,SHOWN:"shown"+a,CLICK_DATA_API:"click"+a+l},d={DROPDOWN_MENU:"dropdown-menu",ACTIVE:"active",DISABLED:"disabled",FADE:"fade",SHOW:"show"},f={A:"a",LI:"li",DROPDOWN:".dropdown",LIST:"ul:not(.dropdown-menu), ol:not(.dropdown-menu), nav:not(.dropdown-menu)",FADE_CHILD:"> .nav-item .fade, > .fade",ACTIVE:".active",ACTIVE_CHILD:"> .nav-item > .active, > .active",DATA_TOGGLE:'[data-toggle="tab"], [data-toggle="pill"]',DROPDOWN_TOGGLE:".dropdown-toggle",DROPDOWN_ACTIVE_CHILD:"> .dropdown-menu .active"},_=function(){function e(t){n(this,e),this._element=t}return e.prototype.show=function(){var e=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&t(this._element).hasClass(d.ACTIVE)||t(this._element).hasClass(d.DISABLED))){var n=void 0,i=void 0,o=t(this._element).closest(f.LIST)[0],s=r.getSelectorFromElement(this._element);o&&(i=t.makeArray(t(o).find(f.ACTIVE)),i=i[i.length-1]);var a=t.Event(u.HIDE,{relatedTarget:this._element}),l=t.Event(u.SHOW,{relatedTarget:i});if(i&&t(i).trigger(a),t(this._element).trigger(l),!l.isDefaultPrevented()&&!a.isDefaultPrevented()){s&&(n=t(s)[0]),this._activate(this._element,o);var h=function(){var n=t.Event(u.HIDDEN,{relatedTarget:e._element}),o=t.Event(u.SHOWN,{relatedTarget:i});t(i).trigger(n),t(e._element).trigger(o)};n?this._activate(n,n.parentNode,h):h()}}},e.prototype.dispose=function(){t.removeClass(this._element,s),this._element=null},e.prototype._activate=function(e,n,i){var o=this,s=t(n).find(f.ACTIVE_CHILD)[0],a=i&&r.supportsTransitionEnd()&&(s&&t(s).hasClass(d.FADE)||Boolean(t(n).find(f.FADE_CHILD)[0])),l=function(){return o._transitionComplete(e,s,a,i)};s&&a?t(s).one(r.TRANSITION_END,l).emulateTransitionEnd(c):l(),s&&t(s).removeClass(d.SHOW)},e.prototype._transitionComplete=function(e,n,i,o){if(n){t(n).removeClass(d.ACTIVE);var s=t(n.parentNode).find(f.DROPDOWN_ACTIVE_CHILD)[0];s&&t(s).removeClass(d.ACTIVE),n.setAttribute("aria-expanded",!1)}if(t(e).addClass(d.ACTIVE),e.setAttribute("aria-expanded",!0),i?(r.reflow(e),t(e).addClass(d.SHOW)):t(e).removeClass(d.FADE),e.parentNode&&t(e.parentNode).hasClass(d.DROPDOWN_MENU)){var a=t(e).closest(f.DROPDOWN)[0];a&&t(a).find(f.DROPDOWN_TOGGLE).addClass(d.ACTIVE),e.setAttribute("aria-expanded",!0)}o&&o()},e._jQueryInterface=function(n){return this.each(function(){var i=t(this),o=i.data(s);if(o||(o=new e(this),i.data(s,o)),"string"==typeof n){if(void 0===o[n])throw new Error('No method named "'+n+'"');o[n]()}})},o(e,null,[{key:"VERSION",get:function(){return i}}]),e}();return t(document).on(u.CLICK_DATA_API,f.DATA_TOGGLE,function(e){e.preventDefault(),_._jQueryInterface.call(t(this),"show")}),t.fn[e]=_._jQueryInterface,t.fn[e].Constructor=_,t.fn[e].noConflict=function(){return t.fn[e]=h,_._jQueryInterface},_}(jQuery),function(t){if("undefined"==typeof Tether)throw new Error("Bootstrap tooltips require Tether (http://tether.io/)");var e="tooltip",s="4.0.0-alpha.6",a="bs.tooltip",l="."+a,h=t.fn[e],c=150,u="bs-tether",d={animation:!0,template:'',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:"0 0",constraints:[],container:!1},f={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"string",constraints:"array",container:"(string|element|boolean)"},_={TOP:"bottom center",RIGHT:"middle left",BOTTOM:"top center",LEFT:"middle right"},g={SHOW:"show",OUT:"out"},p={HIDE:"hide"+l,HIDDEN:"hidden"+l,SHOW:"show"+l,SHOWN:"shown"+l,INSERTED:"inserted"+l,CLICK:"click"+l,FOCUSIN:"focusin"+l,FOCUSOUT:"focusout"+l,MOUSEENTER:"mouseenter"+l,MOUSELEAVE:"mouseleave"+l},m={FADE:"fade",SHOW:"show"},E={TOOLTIP:".tooltip",TOOLTIP_INNER:".tooltip-inner"},v={element:!1,enabled:!1},T={HOVER:"hover",FOCUS:"focus",CLICK:"click",MANUAL:"manual"},I=function(){function h(t,e){n(this,h),this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._isTransitioning=!1,this._tether=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}return h.prototype.enable=function(){this._isEnabled=!0},h.prototype.disable=function(){this._isEnabled=!1},h.prototype.toggleEnabled=function(){this._isEnabled=!this._isEnabled},h.prototype.toggle=function(e){if(e){var n=this.constructor.DATA_KEY,i=t(e.currentTarget).data(n);i||(i=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(n,i)),i._activeTrigger.click=!i._activeTrigger.click,i._isWithActiveTrigger()?i._enter(null,i):i._leave(null,i)}else{if(t(this.getTipElement()).hasClass(m.SHOW))return void this._leave(null,this);this._enter(null,this)}},h.prototype.dispose=function(){clearTimeout(this._timeout),this.cleanupTether(),t.removeData(this.element,this.constructor.DATA_KEY),t(this.element).off(this.constructor.EVENT_KEY),t(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&t(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._tether=null,this.element=null,this.config=null,this.tip=null},h.prototype.show=function(){var e=this;if("none"===t(this.element).css("display"))throw new Error("Please use show on visible elements");var n=t.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){if(this._isTransitioning)throw new Error("Tooltip is transitioning");t(this.element).trigger(n);var i=t.contains(this.element.ownerDocument.documentElement,this.element);if(n.isDefaultPrevented()||!i)return;var o=this.getTipElement(),s=r.getUID(this.constructor.NAME);o.setAttribute("id",s),this.element.setAttribute("aria-describedby",s),this.setContent(),this.config.animation&&t(o).addClass(m.FADE);var a="function"==typeof this.config.placement?this.config.placement.call(this,o,this.element):this.config.placement,l=this._getAttachment(a),c=this.config.container===!1?document.body:t(this.config.container);t(o).data(this.constructor.DATA_KEY,this).appendTo(c),t(this.element).trigger(this.constructor.Event.INSERTED),this._tether=new Tether({attachment:l,element:o,target:this.element,classes:v,classPrefix:u,offset:this.config.offset,constraints:this.config.constraints,addTargetClasses:!1}),r.reflow(o),this._tether.position(),t(o).addClass(m.SHOW);var d=function(){var n=e._hoverState;e._hoverState=null,e._isTransitioning=!1,t(e.element).trigger(e.constructor.Event.SHOWN),n===g.OUT&&e._leave(null,e)};if(r.supportsTransitionEnd()&&t(this.tip).hasClass(m.FADE))return this._isTransitioning=!0,void t(this.tip).one(r.TRANSITION_END,d).emulateTransitionEnd(h._TRANSITION_DURATION);d()}},h.prototype.hide=function(e){var n=this,i=this.getTipElement(),o=t.Event(this.constructor.Event.HIDE);if(this._isTransitioning)throw new Error("Tooltip is transitioning");var s=function(){n._hoverState!==g.SHOW&&i.parentNode&&i.parentNode.removeChild(i),n.element.removeAttribute("aria-describedby"),t(n.element).trigger(n.constructor.Event.HIDDEN),n._isTransitioning=!1,n.cleanupTether(),e&&e()};t(this.element).trigger(o),o.isDefaultPrevented()||(t(i).removeClass(m.SHOW),this._activeTrigger[T.CLICK]=!1,this._activeTrigger[T.FOCUS]=!1,this._activeTrigger[T.HOVER]=!1,r.supportsTransitionEnd()&&t(this.tip).hasClass(m.FADE)?(this._isTransitioning=!0,t(i).one(r.TRANSITION_END,s).emulateTransitionEnd(c)):s(),this._hoverState="")},h.prototype.isWithContent=function(){return Boolean(this.getTitle())},h.prototype.getTipElement=function(){return this.tip=this.tip||t(this.config.template)[0]},h.prototype.setContent=function(){var e=t(this.getTipElement());this.setElementContent(e.find(E.TOOLTIP_INNER),this.getTitle()),e.removeClass(m.FADE+" "+m.SHOW),this.cleanupTether()},h.prototype.setElementContent=function(e,n){var o=this.config.html;"object"===("undefined"==typeof n?"undefined":i(n))&&(n.nodeType||n.jquery)?o?t(n).parent().is(e)||e.empty().append(n):e.text(t(n).text()):e[o?"html":"text"](n)},h.prototype.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},h.prototype.cleanupTether=function(){this._tether&&this._tether.destroy()},h.prototype._getAttachment=function(t){return _[t.toUpperCase()]},h.prototype._setListeners=function(){var e=this,n=this.config.trigger.split(" ");n.forEach(function(n){if("click"===n)t(e.element).on(e.constructor.Event.CLICK,e.config.selector,function(t){return e.toggle(t)});else if(n!==T.MANUAL){var i=n===T.HOVER?e.constructor.Event.MOUSEENTER:e.constructor.Event.FOCUSIN,o=n===T.HOVER?e.constructor.Event.MOUSELEAVE:e.constructor.Event.FOCUSOUT;t(e.element).on(i,e.config.selector,function(t){return e._enter(t)}).on(o,e.config.selector,function(t){return e._leave(t)})}t(e.element).closest(".modal").on("hide.bs.modal",function(){return e.hide()})}),this.config.selector?this.config=t.extend({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},h.prototype._fixTitle=function(){var t=i(this.element.getAttribute("data-original-title"));(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},h.prototype._enter=function(e,n){var i=this.constructor.DATA_KEY;return n=n||t(e.currentTarget).data(i),n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusin"===e.type?T.FOCUS:T.HOVER]=!0),t(n.getTipElement()).hasClass(m.SHOW)||n._hoverState===g.SHOW?void(n._hoverState=g.SHOW):(clearTimeout(n._timeout),n._hoverState=g.SHOW,n.config.delay&&n.config.delay.show?void(n._timeout=setTimeout(function(){n._hoverState===g.SHOW&&n.show()},n.config.delay.show)):void n.show())},h.prototype._leave=function(e,n){var i=this.constructor.DATA_KEY;if(n=n||t(e.currentTarget).data(i),n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusout"===e.type?T.FOCUS:T.HOVER]=!1),!n._isWithActiveTrigger())return clearTimeout(n._timeout),n._hoverState=g.OUT,n.config.delay&&n.config.delay.hide?void(n._timeout=setTimeout(function(){n._hoverState===g.OUT&&n.hide()},n.config.delay.hide)):void n.hide()},h.prototype._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},h.prototype._getConfig=function(n){return n=t.extend({},this.constructor.Default,t(this.element).data(),n),n.delay&&"number"==typeof n.delay&&(n.delay={show:n.delay,hide:n.delay}),r.typeCheckConfig(e,n,this.constructor.DefaultType),n},h.prototype._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},h._jQueryInterface=function(e){return this.each(function(){var n=t(this).data(a),o="object"===("undefined"==typeof e?"undefined":i(e))&&e;if((n||!/dispose|hide/.test(e))&&(n||(n=new h(this,o),t(this).data(a,n)),"string"==typeof e)){if(void 0===n[e])throw new Error('No method named "'+e+'"');n[e]()}})},o(h,null,[{key:"VERSION",get:function(){return s}},{key:"Default",get:function(){return d}},{key:"NAME",get:function(){return e}},{key:"DATA_KEY",get:function(){return a}},{key:"Event",get:function(){return p}},{key:"EVENT_KEY",get:function(){return l}},{key:"DefaultType",get:function(){return f}}]),h}();return t.fn[e]=I._jQueryInterface,t.fn[e].Constructor=I,t.fn[e].noConflict=function(){return t.fn[e]=h,I._jQueryInterface},I}(jQuery));(function(r){var a="popover",l="4.0.0-alpha.6",h="bs.popover",c="."+h,u=r.fn[a],d=r.extend({},s.Default,{placement:"right",trigger:"click",content:"",template:''}),f=r.extend({},s.DefaultType,{content:"(string|element|function)"}),_={FADE:"fade",SHOW:"show"},g={TITLE:".popover-title",CONTENT:".popover-content"},p={HIDE:"hide"+c,HIDDEN:"hidden"+c,SHOW:"show"+c,SHOWN:"shown"+c,INSERTED:"inserted"+c,CLICK:"click"+c,FOCUSIN:"focusin"+c,FOCUSOUT:"focusout"+c,MOUSEENTER:"mouseenter"+c,MOUSELEAVE:"mouseleave"+c},m=function(s){function u(){return n(this,u),t(this,s.apply(this,arguments))}return e(u,s),u.prototype.isWithContent=function(){return this.getTitle()||this._getContent()},u.prototype.getTipElement=function(){return this.tip=this.tip||r(this.config.template)[0]},u.prototype.setContent=function(){var t=r(this.getTipElement());this.setElementContent(t.find(g.TITLE),this.getTitle()),this.setElementContent(t.find(g.CONTENT),this._getContent()),t.removeClass(_.FADE+" "+_.SHOW),this.cleanupTether()},u.prototype._getContent=function(){return this.element.getAttribute("data-content")||("function"==typeof this.config.content?this.config.content.call(this.element):this.config.content)},u._jQueryInterface=function(t){return this.each(function(){var e=r(this).data(h),n="object"===("undefined"==typeof t?"undefined":i(t))?t:null;if((e||!/destroy|hide/.test(t))&&(e||(e=new u(this,n),r(this).data(h,e)),"string"==typeof t)){if(void 0===e[t])throw new Error('No method named "'+t+'"');e[t]()}})},o(u,null,[{key:"VERSION",get:function(){return l}},{key:"Default",get:function(){return d}},{key:"NAME",get:function(){return a}},{key:"DATA_KEY",get:function(){return h}},{key:"Event",get:function(){return p}},{key:"EVENT_KEY",get:function(){return c}},{key:"DefaultType",get:function(){return f}}]),u}(s);return r.fn[a]=m._jQueryInterface,r.fn[a].Constructor=m,r.fn[a].noConflict=function(){return r.fn[a]=u,m._jQueryInterface},m})(jQuery)}(); \ No newline at end of file diff --git a/tasks/chupiflow_ui/static/favicon.ico b/tasks/chupiflow_ui/static/favicon.ico deleted file mode 100644 index f36a475bfe050ea0ae924e2c1d87205b1283aabc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3096 zcmV+z4CnKSP)ga3}@v6ets_3CXu0G9IOD(CB+JXSp`6w67IAAU3%0f2wV`TB)CUd!V~o)^No z+wC^#Ro)^7IPe2hq-TXTF#xhaAV0oSJiTXy7BT?%084{Q>@pEV(gUD*0Py92>YmHxtSZ3Dod zCGWAZc;vS2tWCYY zkpRH34`TnziYPxD8ALq;)Tj8p(!-h_A!=j#_|$Qw!%*){4ZZz;QHE6hMzl~)9asFd z`2To+)snW6`ke&;QvJdK#b>F1*8{-5;zLw?;86SAwax246wd+0Lr-SjKJ@rGakWoOynZA)D4yfW-=X5~H1DrENo3si zgiqyRAcOKZ=#bqRUZYIzx~hC@CLojG(e3q9rp28Q5*B3`A=@_<9hQLwpFR3 zu9dy$)D;cnM55y|U{?M<$)gkMtx);g8Mr)ppLcO3kZlw2AoNnZcNVGmQ67^+P{{k; zx~?3Y{stM%5ZiUXU}esgtQs&r?5)sC??#sG@aBk*5j8;h;6y_*>nhW!hWPh0e(y#Y zh<5+Irt()6G-fs{7`a|O1bhDb5>v{OwjMA%>`>F`t5Ol`y`RyL(lIjyKT5aV*-gMq z!|uWQ`~Fl7MhV#%aeKD!Jx z^k5DE^g=@-Qs4CYQtK#Glc8HM(}V%2Gj5{3Fq68ERDnM90f4=q;`nuPtlhwA-3WTk zgAga(RKfrhw6$_kWd#z6;&BWX78q2}`=?zgxWvo?e~pR~e?~5zt-}Db4I6Gh_mxOz*l`XkNEOM z#hH)w{;_4nFBBUGdXm-%T4ts4MGpAES$nj}bn+RVh#*iNUG<0!L78iBZJwRxbh90X zq8q|}ZiHH1ULe~^jqbySg^Yg?tOncx9 z#6(|UsQR8109+8xy|<)%Ul^o4QI>d}qWdfertaAFCl+3RO+5dC4FE|0sxP$@_6j%7 zg!#&%!gYg4w>JzR8#1%NikKH*WdMo;Vx`bBHfUL|jL~bpLz|o@R|N{1F+8r(08LTb za+`qxC!L@?TI&5}F9QiDa6E6(1BT)b3FUg3kg^D15gsu|4)bH25?m(g|HJ$!BdMtkaQLRIM6dzXoLVk5Qtt{#Q-`W@OyQ)2mnM7 z?B@dxdzH5c02)m2&?4OhJpeKw!0laDW#JGOvucA3s{nvlh)6bC<9DN-b~`TrLl=&&j95h z08>zuF`P7*1VZBZsTKf;^ZU;oY^Z6&o>x`AA$Eq;#012Oh_jY$!}vYGW0+|KV4XL4 zK!q2_A>;`|FKTKCSjgh1CiHP=m$>X|&I>;kd>0q|_CpQ;;CY8lt6q}n494`Yng{q` z(Kd0sgbDLBF*8VQ;}50)pg4k7CTAk6L5 zgizwD<|3IK8%MzZyJYG-%FjYPKQU#1QWPE7Ra@az(X8XV$sa`hyU2$~Eo@11Nj$%_)+rRlzAf_DF~pR5qtx9l3|f>e~>HvUglt z=mb~9^B-9NKwOs0Wp|IsRPfA&n-@?c$M!YQR(G5)j;DBNQz%7>Nnt{-vRer!u> zdp@!PK%+W^6&Ao}0p+O70y^vw->Sy`IIR4JU~+(N$CfIjP}?$9eGY1+&>KNpgJ*qH zW(+&`RXl?uF)-YJt=lUz#i=fi^uk@{`Ws8{7YbUD$i0tqYXbn(YM}{E)oP9GbMaNx z9ndHfD=18NLb>X8yZvo68u>y!|NfOc|C9V(Yqn^GHjqbJk@A7vex~M2r1F*hHJwSJ z)C%B>emb@`)95UDC*>c=06=P?(BD2~+6&$B^NGKSsisMHS~!&jCKksC+SvQS=HFDH z7yXSRiF*g^J?2;a&S6T_|19Wp6}0weFj*KvMH|arw;hlNp*K-QD>-(e`;^iX-boMN zBZept75MVEkG-CASyjhW!VV#Cb?#}pG_0&D_c6KZD|j4L{*}<%qhKct_GU5yfWhF* zq6@Wrh|1TElp>RXrxVkMholHGIrL9t(6%H|4a$eT8H{#j<60d&6?)U3mR)87L*=vV z$jSl$q#)g$!8-{8L*;|8%sF7I4}ha>Qv(nzJEZ)jq4K9(gHfKPm(%Uhv9L3n#uddw zeE%fO7PG95ZC+7317w&$rG#6aKu+PgjfI~v75;3fd=r+fh5;Cn4mpN-)7b#q0tR+W zb047mF?V}-mS0>ar=67)?cJuK$_G=p5aw$Tcc^^!klC}`0Q*(FrVS7y8^jG!e2BfC z!<1Q5zI6aFgTQ$pFe~1=@+$!Vqfc-MgnL&^15*99;4PAoU__MKe0HzGM&bs+-bVls ztZnv^0%ilHPXvFR0V@t)tpH%k6O@4gm;r{GWA4VomBB#m=a!UiCC+ya02#`(o%u`f z_=Lbvs&$Yl!Lt86!%x40ripOTi)b|fG8FnC>MJjoihl>N0#R?dm=x`O=TOAM2Vc=?Mn}!>6Titk`4)y*{ctr1e~C5iGlU zEt*-qixS)4SG~QRb0#3iEX2|*yv;aZZ4+?tzL(1X#gzy0Tx}@~yJnU`#yS)~cX3UG z=X%Q_5UvpjO)?Gpnnw37FNX1&(wdo?f7~foZULtbqMJ<I06Bozmy0<=K$x&ZjQ0Ek_VlDp3tU?V mfD&+os^>Ok@` .btn, .btn-sm { - padding: .5rem 1rem; -} - -.switch { - position: relative; - display: inline-block; - width: 45px; - height: 25px; -} - -.switch input { - opacity: 0; - width: 0; - height: 0; -} - -.slider { - position: absolute; - cursor: pointer; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: #ccc; - -webkit-transition: .4s; - transition: .4s; -} - -.slider:before { - position: absolute; - content: ""; - height: 17px; - width: 18px; - left: 4px; - bottom: 4px; - background-color: white; - -webkit-transition: .4s; - transition: .4s; -} - -input:checked + .slider { - background-color: #2196F3; -} - -input:focus + .slider { - box-shadow: 0 0 1px #2196F3; -} - -input:checked + .slider:before { - -webkit-transform: translateX(20px); - -ms-transform: translateX(20px); - transform: translateX(20px); -} - -/* Rounded sliders */ -.slider.round { - border-radius: 34px; -} - -.slider.round:before { - border-radius: 50%; -} - -.field{ - width: 100%; - display: block; -} - -input[type=text], select { - width: 100%; - padding: 12px 15px; - display: block; - border: .5px solid #ccc; - border-radius: 5px; - box-sizing: border-box; - margin-right: 10px; - font-size: 13px; -} - -.edit-container { - border-radius: 5px; - background-color: #f2f2f2; - padding: 20px; -} - -.field{ - margin-top: 15px; - font-weight: bold; -} - -.button-container{ - margin-top: 20px; - width: 50%; -} diff --git a/tasks/chupiflow_ui/templates/editjob.html b/tasks/chupiflow_ui/templates/editjob.html deleted file mode 100644 index f799ead6..00000000 --- a/tasks/chupiflow_ui/templates/editjob.html +++ /dev/null @@ -1,54 +0,0 @@ -{% extends "layout.html" %} -{% block content %} -
-

Edit

-

Editing job {{cron}} in {{tabfile}}.tab

-
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- - {% if error %} - Job is not valid. {{ error }} - {% endif %} -
-
-
-
-{% endblock %} \ No newline at end of file diff --git a/tasks/chupiflow_ui/templates/file_viewer.html b/tasks/chupiflow_ui/templates/file_viewer.html deleted file mode 100644 index 33873ab0..00000000 --- a/tasks/chupiflow_ui/templates/file_viewer.html +++ /dev/null @@ -1,28 +0,0 @@ -{% extends "layout.html" %} -{% block content %} -
-
- {% if file_type == 'task' %} -

Taskfile for: {{ cron }}

- {% elif file_type == 'log'%} -

Logfile for: {{ cron }}

- {% if status=='done'%} - - {% elif status=='running'%} - - {% endif %} - MANUAL RUN: - {{ status|upper }} - {% elif file_type == 'tabfile'%} -

Tabfile view

- {% endif %} -
- -
    - {% for line in file %} -
  1. {{ line|safe }}
  2. - {% endfor %} -
-
-
-{% endblock %} \ No newline at end of file diff --git a/tasks/chupiflow_ui/templates/jobs.html b/tasks/chupiflow_ui/templates/jobs.html deleted file mode 100644 index 5f06c00e..00000000 --- a/tasks/chupiflow_ui/templates/jobs.html +++ /dev/null @@ -1,66 +0,0 @@ -{% extends "layout.html" %} -{% block content %} -
-
- - -
- {% if error %} - Path doesn't exist - {% endif %} -
-
-
- {% for tabfilename, tabdict in tabfiles.items() %} -
-

Tabfile: {{ tabfilename }}

- - - - - - - - - - - - - {% for cron, crondict in tabdict.items() %} - - - - {% if crondict['enabled'] %} - - {% if crondict['valid'] %} - - - - - - - {% endfor %} - - - {% endfor %} - - - - -{% endblock %} - diff --git a/tasks/chupiflow_ui/templates/layout.html b/tasks/chupiflow_ui/templates/layout.html deleted file mode 100644 index 5fd68927..00000000 --- a/tasks/chupiflow_ui/templates/layout.html +++ /dev/null @@ -1,73 +0,0 @@ - - - - Smart Citizen ChupiFlow - - - - - - - - - - - - -
- - -
- {% block content %}{% endblock %} - {% block footer %} - - - {% endblock %} - - - - - - - - \ No newline at end of file diff --git a/tasks/chupiflow_ui/wsgi.py b/tasks/chupiflow_ui/wsgi.py deleted file mode 100644 index 11e7de6d..00000000 --- a/tasks/chupiflow_ui/wsgi.py +++ /dev/null @@ -1,4 +0,0 @@ -from app import app - -if __name__ == "__main__": - app.run() \ No newline at end of file diff --git a/tasks/dprocess.py b/tasks/dprocess.py deleted file mode 100755 index cb648ede..00000000 --- a/tasks/dprocess.py +++ /dev/null @@ -1,54 +0,0 @@ -# internal imports -from scdata._config import config -from scdata import Device -from scdata.utils import std_out -import sys - -# Config settings -config._out_level = 'DEBUG' -config._timestamp = True -config._avoid_negative_conc = True - -def dprocess(device, dryrun = False): - ''' - This function processes a device from SC API assuming there - is postprocessing information in it and that it's valid for doing - so - ''' - std_out(f'[CHUPIFLOW] Processing instance for device {device}') - # Create device from SC API - d = Device(descriptor = {'source': 'api', 'id': f'{device}'}) - if d.validate(): - # Load only unprocessed - if d.load(only_unprocessed=True, options = {'resample': False}, - max_amount=config._max_load_amount): - # Process it - d.process() - # Post results - d.post_metrics(dry_run=dry_run, - max_retries = config._max_forward_retries) - # Forward it if requested - if d.forwarding_request is not None: - std_out(f'[CHUPIFLOW] Forwarding {device}') - d.forward(dry_run=dry_run, - max_retries = config._max_forward_retries) - d.update_postprocessing(dry_run=dry_run) - else: - std_out(f'[CHUPIFLOW] Device {device} not valid', 'ERROR') - std_out(f'[CHUPIFLOW] Concluded job for {device}') - -if __name__ == '__main__': - - if '-h' in sys.argv or '--help' in sys.argv or '-help' in sys.argv: - print('dprocess: Process device of SC API') - print('USAGE:\n\rdprocess.py --device [options]') - print('options:') - print('--dry-run: dry run') - sys.exit() - - if '--dry-run' in sys.argv: dry_run = True - else: dry_run = False - - if '--device' in sys.argv: - device = int(sys.argv[sys.argv.index('--device')+1]) - dprocess(device, dry_run) diff --git a/tasks/dschedule.py b/tasks/dschedule.py deleted file mode 100644 index f227f4be..00000000 --- a/tasks/dschedule.py +++ /dev/null @@ -1,72 +0,0 @@ -from os.path import join -from os import makedirs -import sys - -from scdata._config import config -from scdata.utils import std_out -from scdata.io.device_api import ScApiDevice -from scdata import Device - -config._out_level = 'DEBUG' -config._timestamp = True - -from scheduler import Scheduler - -def dschedule(interval_hours, dry_run = False): - ''' - This function schedules processing SC API devices based - on the result of a global query for data processing - in the SC API - ''' - try: - df = ScApiDevice.search_by_query(key="postprocessing_id", - value="not_null", full= True) - except: - pass - return None - - # Check devices to postprocess first - dl = [] - - for device in df.index: - std_out(f'[CHUPIFLOW] Checking postprocessing for {device}') - scd = Device(descriptor={'source': 'api', 'id': device}) - # Avoid scheduling invalid devices - if scd.validate(): dl.append(device) - else: std_out(f'[CHUPIFLOW] Device {device} not valid', 'ERROR') - - for d in dl: - # Set scheduler - s = Scheduler() - # Define task - task = f'{config._device_processor}.py --device {d}' - #Create log output if not existing - dt = join(config.paths['tasks'], str(d)) - makedirs(dt, exist_ok=True) - log = f"{join(dt, f'{config._device_processor}_{d}.log')}" - # Schedule task - s.schedule_task(task = task, - log = log, - interval = f'{interval_hours}H', - dry_run = dry_run, - load_balancing = True) - -if __name__ == '__main__': - - if '-h' in sys.argv or '--help' in sys.argv or '-help' in sys.argv: - print('dschedule: Schedule tasks for devices to process in SC API') - print('USAGE:\n\rdschedule.py [options]') - print('options:') - print('--interval-hours: taks execution interval in hours (default: scdata.config._postprocessing_interval_hours)') - print('--dry-run: dry run') - sys.exit() - - if '--dry-run' in sys.argv: dry_run = True - else: dry_run = False - - if '--interval-hours' in sys.argv: - interval = int(sys.argv[sys.argv.index('--interval-hours')+1]) - else: - interval = config._postprocessing_interval_hours - - dschedule(interval, dry_run) diff --git a/tasks/requirements.txt b/tasks/requirements.txt deleted file mode 100644 index a8dc8a78..00000000 --- a/tasks/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -python-crontab==2.5.1 \ No newline at end of file diff --git a/tasks/scheduler.py b/tasks/scheduler.py deleted file mode 100644 index ad86cc97..00000000 --- a/tasks/scheduler.py +++ /dev/null @@ -1,105 +0,0 @@ -from crontab import CronTab -from os.path import join, realpath, dirname -import sys -import subprocess - -from scdata._config import config -from scdata.utils import std_out -from numpy import zeros, random, where -config._out_level = 'DEBUG' -config._timestamp = True - -class Scheduler(object): - """Wrapper class for CronTab Task Scheduling""" - def __init__(self, tabfile = None): - self.cron = CronTab(user=True) - if tabfile is None: - self.tabfile = join(config.paths['tasks'], f'{config._tabfile}.tab') - else: - self.tabfile = tabfile - - def check_slots(self, frequency = 'hourly'): - # Check frequency - if frequency == 'hourly': - sn = 60 - fn = 8760 - elif frequency == 'daily': - sn = 24 - fn = 365 - # Check for slots - slots = zeros(sn) - for job in self.cron: - if job.frequency() == fn: - for part in job.minutes.parts: - slots[part]+=1 - - # Return a random slot - return random.choice(where(slots == slots.min())[0]) - - def schedule_task(self, task, log, interval, force_first_run = False,\ - overwrite = False, dry_run = False, load_balancing = False): - std_out(f'Setting up {task}...') - - # Find if the task is already there - comment = task.replace('--','').replace(' ', '_').replace('.py','') - - if self.check_existing(comment): - std_out('Task already exists') - if not overwrite: - std_out('Skipping') - return - else: - std_out('Removing') - self.remove(comment) - - # Check if dry_run - if dry_run: _dry_run = '--dry-run' - else: _dry_run = '' - - # Make command - instruction = f'{dirname(realpath(__file__))}/{task} {_dry_run}' - command = f"{sys.executable} {instruction} >> {log} 2>&1" - print (command) - - # Set cronjob - job = self.cron.new(command=command, comment=comment) - - # Workaround for parsing interval - if interval.endswith('D'): - job.every(int(interval[:-1])).days() - # If load balancing, add in low slot - if load_balancing: job.hour.on(self.check_slots('daily')) - elif interval.endswith('H'): - job.every(int(interval[:-1])).hours() - # If load balancing, add in low slot - if load_balancing: job.minute.on(self.check_slots('hourly')) - elif interval.endswith('M'): - job.every(int(interval[:-1])).minutes() - # No load balance for minutes - self.cron.write(self.tabfile) - - # Workaround for macos? - subprocess.call(['crontab', self.tabfile]) - - if force_first_run: - std_out('Running task for first time. This could take a while') - job.run() - - std_out('Done', 'SUCCESS') - - def remove(self, comment): - l = [] - c = self.cron.find_comment(comment) - for item in c: self.cron.remove(item) - self.cron.write(self.tabfile) - - def check_existing(self, comment): - l = [] - c = self.cron.find_comment(comment) - for item in c: l.append(c) - if l: - std_out(f'{comment} already running') - return True - else: - std_out(f'{comment} not running') - return False From 42fe99dd0b1987592b276788fd97cc54c9c15ecf Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:57:43 +0200 Subject: [PATCH 20/72] Move to logger --- scdata/_config/__init__.py | 2 +- scdata/_config/config.py | 9 +- scdata/device/process/alphasense.py | 44 +++--- scdata/device/process/baseline.py | 76 +++++----- scdata/device/process/regression.py | 10 +- scdata/device/process/timeseries.py | 48 +++---- scdata/io/model.py | 48 +++---- scdata/test/plot/box_plot.py | 13 +- scdata/test/plot/heatmap_iplot.py | 16 +-- scdata/test/plot/heatmap_plot.py | 13 +- scdata/test/plot/maps.py | 20 +-- scdata/test/plot/plot_tools.py | 45 +++--- scdata/test/plot/scatter_dispersion_grid.py | 62 ++++---- scdata/test/plot/scatter_iplot.py | 25 ++-- scdata/test/plot/scatter_plot.py | 21 +-- scdata/test/plot/ts_dendrogram.py | 44 +++--- scdata/test/plot/ts_dispersion_grid.py | 50 +++---- scdata/test/plot/ts_dispersion_plot.py | 56 ++++---- scdata/test/plot/ts_dispersion_uplot.py | 28 ++-- scdata/test/plot/ts_iplot.py | 12 +- scdata/test/plot/ts_plot.py | 17 +-- scdata/test/plot/ts_scatter.py | 19 +-- scdata/test/plot/ts_uplot.py | 12 +- scdata/utils/__init__.py | 2 +- scdata/utils/lazy.py | 4 +- scdata/utils/location.py | 6 +- scdata/utils/out.py | 57 ++++---- scdata/utils/units.py | 25 ++-- scdata/utils/zenodo.py | 150 ++++++++++---------- 29 files changed, 469 insertions(+), 465 deletions(-) diff --git a/scdata/_config/__init__.py b/scdata/_config/__init__.py index dfc976ab..073b33b0 100644 --- a/scdata/_config/__init__.py +++ b/scdata/_config/__init__.py @@ -1,3 +1,3 @@ from .config import Config -config = Config() +config = Config() \ No newline at end of file diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 3f67bf93..1bc53995 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -13,13 +13,10 @@ from math import inf from numpy import array -class Config(object): +import logging - # Output level - # 'QUIET': nothing - # 'NORMAL': warn, err, - # 'DEBUG': info, warn, err - _out_level = 'NORMAL' +class Config(object): + log_level = logging.INFO # Timestamp for log output _timestamp = True diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index ce5c0dd1..2f0ec7f5 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, get_units_convf, find_dates, localise_date +from scdata.utils import logger, get_units_convf, find_dates, localise_date from scdata._config import config from scdata.device.process.params import * from scdata.device.process import baseline_calc, clean_ts @@ -55,16 +55,16 @@ def comp_t(x, comp_lut): if 't' not in kwargs: flag_error = True if flag_error: - std_out('Problem with input data', 'ERROR') + logger.warning('Problem with input data') return None if kwargs['alphasense_id'] is None: - std_out(f"Empty ID. Ignoring", 'WARNING') + logger.warning(f"Empty ID. Ignoring") return None # Get Sensor data if kwargs['alphasense_id'] not in config.calibrations: - std_out(f"Sensor {kwargs['alphasense_id']} not in calibration data", 'ERROR') + logger.warning(f"Sensor {kwargs['alphasense_id']} not in calibration data") return None # Make copy @@ -89,8 +89,8 @@ def comp_t(x, comp_lut): try: cal_data[item] = float (cal_data[item]) except: - std_out(f"Alphasense calibration data for {kwargs['alphasense_id']} is not correct", 'ERROR') - std_out(f'Error on {item}: \'{cal_data[item]}\'', 'ERROR') + logger.error(f"Alphasense calibration data for {kwargs['alphasense_id']} is not correct") + logger.error(f'Error on {item}: \'{cal_data[item]}\'') return # Remove spurious voltages (0V < electrode < 5V) @@ -150,7 +150,7 @@ def ec_sensor_temp(dataframe, **kwargs): if kwargs['priority'] in dataframe.columns: return dataframe[kwargs['priority']] for option in config._as_temp_channel: if option in dataframe.columns: return dataframe[option] - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None def alphasense_pt1000(dataframe, **kwargs): @@ -181,16 +181,16 @@ def alphasense_pt1000(dataframe, **kwargs): if 'pt1000minus' not in kwargs: flag_error = True if flag_error: - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None if kwargs['afe_id'] is None: - std_out(f"Empty ID. Ignoring", 'WARNING') + logger.warning(f"Empty ID. Ignoring") return None # Get Sensor data if kwargs['afe_id'] not in config.calibrations: - std_out(f"AFE {kwargs['afe_id']} not in calibration data", 'ERROR') + logger.error(f"AFE {kwargs['afe_id']} not in calibration data") return None # Retrieve calibration data - verify its all float @@ -199,8 +199,8 @@ def alphasense_pt1000(dataframe, **kwargs): try: cal_data[item] = float (cal_data[item]) except: - std_out(f"Alphasense calibration data for {kwargs['afe_id']} is not correct", 'ERROR') - std_out(f'Error on {item}: \'{cal_data[item]}\'', 'ERROR') + logger.error(f"Alphasense calibration data for {kwargs['afe_id']} is not correct") + logger.error(f'Error on {item}: \'{cal_data[item]}\'') return # Make copy @@ -233,11 +233,11 @@ def channel_names(dataframe, **kwargs): flag_error = False if 'channel' not in kwargs: flag_error = True if kwargs['channel'] not in dataframe: - std_out(f"Channel {kwargs['channel']} not in dataframe. Ignoring", 'WARNING') + logger.warning(f"Channel {kwargs['channel']} not in dataframe. Ignoring") return None if flag_error: - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None # Make copy @@ -272,12 +272,12 @@ def basic_4electrode_alg(dataframe, **kwargs): if 'pollutant' not in kwargs: flag_error = True if flag_error: - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None # Get Sensor data if kwargs['id'] not in config.calibrations: - std_out(f"Sensor {kwargs['id']} not in calibration data", 'ERROR') + logger.error(f"Sensor {kwargs['id']} not in calibration data") return None we_sensitivity_na_ppb = config.calibrations[kwargs['id']]['we_sensitivity_na_ppb'] @@ -286,14 +286,14 @@ def basic_4electrode_alg(dataframe, **kwargs): nWA = config.calibrations[kwargs['id']]['we_sensor_zero_mv']/config.calibrations[kwargs['id']]['ae_sensor_zero_mv'] if sensor_type != kwargs['pollutant']: - std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') + logger.error(f"Sensor {kwargs['id']} doesn't coincide with calibration data") return None # This is always in ppm since the calibration data is in signal/ppm if kwargs['hardware'] == 'alphadelta': current_factor = alphadelta_pcb elif kwargs['hardware'] == 'isb': current_factor = 1 #TODO make it so we talk in mV else: - std_out(f"Measurement hardware {kwargs['hardware']} not supported", 'ERROR') + logger.error(f"Measurement hardware {kwargs['hardware']} not supported") return None result = current_factor*(dataframe[kwargs['working']] - nWA*dataframe[kwargs['auxiliary']])/abs(we_sensitivity_na_ppb) @@ -380,7 +380,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): else: deltas = baseline_deltas if flag_error: - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None min_date, max_date, _ = find_dates(dataframe) @@ -401,7 +401,7 @@ def baseline_4electrode_alg(dataframe, **kwargs): nWA = config.calibrations.loc[kwargs['id'],'w_zero_current']/config.calibrations.loc[kwargs['id'],'aux_zero_current'] if target_1 != kwargs['pollutant']: - std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') + logger.error(f"Sensor {kwargs['id']} doesn't coincide with calibration data") return None result = pcb_factor*(dataframe[kwargs['target']] - baseline)/abs(sensitivity_1) @@ -451,7 +451,7 @@ def deconvolution(dataframe, **kwargs): if 'pollutant' not in kwargs: flag_error = True if flag_error: - std_out('Problem with input data', 'ERROR') + logger.error('Problem with input data') return None sensitivity_1 = config.calibrations.loc[kwargs['id'],'sensitivity_1'] @@ -461,7 +461,7 @@ def deconvolution(dataframe, **kwargs): nWA = config.calibrations.loc[kwargs['id'],'w_zero_current']/config.calibrations.loc[kwargs['id'],'aux_zero_current'] if target_1 != kwargs['pollutant']: - std_out(f"Sensor {kwargs['id']} doesn't coincide with calibration data", 'ERROR') + logger.error(f"Sensor {kwargs['id']} doesn't coincide with calibration data") return None factor_unit_1 = get_units_convf(kwargs['pollutant'], from_units = 'ppm') diff --git a/scdata/device/process/baseline.py b/scdata/device/process/baseline.py index 2f3b0745..4e5d1e32 100644 --- a/scdata/device/process/baseline.py +++ b/scdata/device/process/baseline.py @@ -7,7 +7,7 @@ from numpy import max as npmax from numpy import abs as npabs from numpy import argmax, argmin, arange, exp -from scdata.utils import std_out +from scdata.utils import logger from scdata._config import config from math import isnan from .formulae import exp_f @@ -26,7 +26,7 @@ def find_min_max(min_max, iterable = list()): Returns ------- Value and index of maximum in the list - """ + """ if min_max == 'max': value = npmax(iterable) @@ -34,7 +34,7 @@ def find_min_max(min_max, iterable = list()): elif min_max == 'min': value = npmin(iterable) index = argmin(iterable) - else: + else: value, index = None, None return value, index @@ -65,8 +65,8 @@ def get_delta_baseline(series, **kwargs): if 'btype' in kwargs: btype = kwargs['btype'] else: btype = 'min' - if delta == 0: std_out(f'Not valid delta = {delta}', 'ERROR'); return None - + if delta == 0: logger.error(f'Not valid delta = {delta}'); return None + result = series.copy() # result = result.resample(resample).mean() @@ -74,12 +74,12 @@ def get_delta_baseline(series, **kwargs): for pos in range(0, len(pdates)-1): chunk = series[pdates[pos]:pdates[pos+1]] - + if len(chunk.values) == 0: result[pdates[pos]:pdates[pos+1]] = 0 - else: + else: if btype == 'min': result[pdates[pos]:pdates[pos+1]] = min(chunk.values) elif btype == 'max': result[pdates[pos]:pdates[pos+1]] = max(chunk.values) - + return result def get_als_baseline(series, lambd = 1e5, p = 0.01, n_iter=10): @@ -93,7 +93,7 @@ def get_als_baseline(series, lambd = 1e5, p = 0.01, n_iter=10): Z = W + lambd * D.dot(D.transpose()) z = spsolve(Z, w*series) w = p * (series > z) + (1-p) * (series < z) - + return z # TODO DOCUMENT @@ -102,10 +102,10 @@ def baseline_calc(dataframe, **kwargs): ''' reg_type baseline_type - if als + if als lambdas p - if deltas + if deltas esample: int (optional) '1Min' Frequency at which the delta is based on, and therefore to resample to @@ -119,27 +119,27 @@ def baseline_calc(dataframe, **kwargs): else: baseline_type = kwargs['baseline_type'] pearsons =[] - target_name = dataframe.iloc[:,0].name; std_out ('Target: ', target_name) - baseline_name = dataframe.iloc[:,1].name; std_out ('Baseline: ', baseline_name) + target_name = dataframe.iloc[:,0].name; logger.info('Target: ', target_name) + baseline_name = dataframe.iloc[:,1].name; logger.info('Baseline: ', baseline_name) result = dataframe.copy() result.dropna(axis = 0, inplace=True) if result.empty: return None - if config._intermediate_plots and config._plot_out_level == 'DEBUG': + if config._intermediate_plots and config._plot_out_level == 'DEBUG': fig, ax = plt.subplots(figsize=(12,8)) if baseline_type == 'deltas': if 'deltas' not in kwargs: n_deltas = config._baseline_deltas else: n_deltas = kwargs['deltas'] - + if 'resample' not in kwargs: resample = '1Min' else: resample = kwargs['resample'] result = result.resample(resample).mean() - + l_iter = n_deltas for delta in n_deltas: @@ -155,7 +155,7 @@ def baseline_calc(dataframe, **kwargs): target_resampled = result.loc[:,name_delta].resample(f'{delta*off_base}{off_alias}').mean().values baseline_resampled = result.loc[:,baseline_name].resample(f'{delta*off_base}{off_alias}').mean().values - if config._intermediate_plots and config._plot_out_level == 'DEBUG': + if config._intermediate_plots and config._plot_out_level == 'DEBUG': ax.plot(result.index, result[name_delta], label = name_delta) _, _, r_value, _, _ = linregress(transpose(target_resampled), transpose(baseline_resampled)) @@ -176,7 +176,7 @@ def baseline_calc(dataframe, **kwargs): name_lambda = name +'_' +str(lambd) result[name_lambda] = get_als_baseline(result.loc[:,target_name], lambd, p) - if config._intermediate_plots and config._plot_out_level == 'DEBUG': + if config._intermediate_plots and config._plot_out_level == 'DEBUG': ax.plot(result.index, result[name_lambda], label = name_lambda) _, _, r_value, _, _ = linregress(transpose(result[name_lambda]), transpose(result.loc[:,baseline_name].values)) @@ -186,40 +186,40 @@ def baseline_calc(dataframe, **kwargs): plt.show() ax.plot(result.index, result.loc[:,target_name], label = target_name) ax.plot(result.index, result.loc[:,baseline_name], label = baseline_name) - + ax.axis('tight') ax.legend(loc='best') ax.set_xlabel('Date') ax.set_ylabel('Baselines') ax.grid(True) - + plt.show() ## Find Max in the pearsons - correlation can be negative, so use absolute of the pearson _, ind_max = find_min_max('max', npabs(pearsons)) - # std_out(f'Max index in pearsons: {ind_max}') + # logger.info(f'Max index in pearsons: {ind_max}') result.dropna(axis = 0, inplace=True) if reg_type == 'linear': - + ## Fit with y = A + Bx slope, intercept, r_value, p_value, std_err = linregress(transpose(result.loc[:,baseline_name].values), result[(target_name + f'_{l_iter[ind_max]}')]) baseline = intercept + slope*result.loc[:,baseline_name].values # print (r_value) - + elif reg_type == 'exponential': - + ## Fit with y = Ae^(Bx) -> logy = logA + Bx logy = log(result[(target_name + f'_{l_iter[ind_max]}')]) slope, intercept, r_value, p_value, std_err = linregress(transpose(result.loc[:,baseline_name].values), logy) baseline = exp_f(transpose(result.loc[:,baseline_name].values), exp(intercept), slope, 0) # print (r_value) - + elif reg_type == 'best': - + ## Find linear r_value slope_lin, intercept_lin, r_value_lin, p_value_lin, std_err_lin = linregress(transpose(result.loc[:, baseline_name].values), result[(target_name + f'_{l_iter[ind_max]}')]) - + ## Find Exponential r_value logy = log(result[(target_name + f'_{l_iter[ind_max]}')]) slope_exp, intercept_exp, r_value_exp, p_value_exp, std_err_exp = linregress(transpose(result.loc[:, baseline_name].values), logy) @@ -231,32 +231,32 @@ def baseline_calc(dataframe, **kwargs): baseline = intercept_lin + slope_lin*result.loc[:,baseline_name].values else: baseline = exp_f(transpose(result.loc[:,baseline_name].values), exp(intercept_exp), slope_exp, 0) - + elif not isnan(r_value_lin): - + baseline = intercept_lin + slope_lin*result.loc[:,baseline_name].values - + elif not isnan(r_value_exp): - + baseline = exp_f(transpose(result.loc[:,baseline_name].values), exp(intercept_exp), slope_exp, 0) else: - std_out('Exponential and linear regression are nan', 'ERROR') - + logger.error('Exponential and linear regression are nan') + # Avoid baselines higher than the target result[target_name + '_baseline_raw'] = baseline result[target_name + '_baseline'] = result[[(target_name + '_' + 'baseline_raw'), target_name]].min(axis=1) - + if config._intermediate_plots and config._plot_out_level == 'DEBUG': with plt.style.context('seaborn-white'): fig1, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12,8)) - + ax1.plot(result.loc[:, baseline_name].values, result[(target_name + f'_{l_iter[ind_max]}')], label = 'Baseline ' + str(l_iter[ind_max]), linewidth=0, marker='o') ax1.plot(result.loc[:, baseline_name].values, result[(target_name + '_baseline')] , label = 'Regressed value', linewidth=0, marker='o') legend = ax1.legend(loc='best') ax1.set_xlabel(baseline_name) ax1.set_ylabel('Regression values') ax1.grid(True) - + lns1 = ax2.plot(result.index, result.loc[:, target_name], label = "Target", linestyle=':', linewidth=1, marker=None) #[ax2.plot(result.index, result[(name +'_' +str(delta))].values, label="Delta {}".format(delta), marker=None, linestyle='-', linewidth=1) for delta in _numberDeltas] lns2 = ax2.plot(result.index, result[target_name + '_' + 'baseline'], label='Baseline', marker = None) @@ -264,7 +264,7 @@ def baseline_calc(dataframe, **kwargs): ax2.axis('tight') ax2.set_title("Baseline Extraction") ax2.grid(True) - + ax22 = ax2.twinx() lns22 = ax22.plot(result.index, result.loc[:, baseline_name].values, color = 'red', label = baseline_name, linestyle='-', linewidth=1, marker=None) ax22.set_ylabel(result.loc[:, baseline_name].name, color = 'red') @@ -274,7 +274,7 @@ def baseline_calc(dataframe, **kwargs): lns = lns1+lns2+lns22 labs = [l.get_label() for l in lns] ax2.legend(lns, labs, loc='best') - + fig2, ax3 = plt.subplots(figsize=(12,8)) # two axes on figure ax3.plot(l_iter, pearsons) diff --git a/scdata/device/process/regression.py b/scdata/device/process/regression.py index 80764648..1eb27c7c 100644 --- a/scdata/device/process/regression.py +++ b/scdata/device/process/regression.py @@ -1,5 +1,5 @@ from scdata._config import config -from scdata.utils import std_out, dict_fmerge, clean +from scdata.utils import logger, dict_fmerge, clean from pandas import DataFrame from numpy import array @@ -34,12 +34,12 @@ def apply_regressor(dataframe, **kwargs): inputdf = dataframe[inputs].copy() inputdf = inputdf.reindex(sorted(inputdf.columns), axis=1) except KeyError: - std_out('Inputs not in dataframe', 'ERROR') + logger.error('Inputs not in dataframe') pass return None if 'model' not in kwargs: - std_out('Model not in inputs', 'ERROR') + logger.error('Model not in inputs') else: model = kwargs['model'] @@ -47,9 +47,9 @@ def apply_regressor(dataframe, **kwargs): options = config._model_def_opt else: options = dict_fmerge(config._model_def_opt, kwargs['options']) - + # Remove na - inputdf = clean(inputdf, options['clean_na'], how = 'any') + inputdf = clean(inputdf, options['clean_na'], how = 'any') features = array(inputdf) result = DataFrame(model.predict(features)).set_index(inputdf.index) diff --git a/scdata/device/process/timeseries.py b/scdata/device/process/timeseries.py index a9bd2612..38a54ba3 100644 --- a/scdata/device/process/timeseries.py +++ b/scdata/device/process/timeseries.py @@ -1,7 +1,7 @@ from numpy import nan, full, power, ones, diff, convolve, append from scipy import ndimage from scdata.device.process import is_within_circle -from scdata.utils import std_out +from scdata.utils import logger def delta_index_ts(dataframe, **kwargs): result = dataframe.index.to_series().diff().astype('timedelta64[s]') @@ -25,7 +25,7 @@ def poly_ts(dataframe, **kwargs): ------- result = sum(coefficients[i]*channels[i]^exponents[i] + extra_term) """ - + if 'channels' not in kwargs: return None else: channels = kwargs['channels'] n_channels = len(channels) @@ -34,10 +34,10 @@ def poly_ts(dataframe, **kwargs): if 'coefficients' not in kwargs: coefficients = ones(n_channels) else: coefficients = kwargs['coefficients'] - + if 'exponents' not in kwargs: exponents = ones(n_channels) else: exponents = kwargs['exponents'] - + if 'extra_term' not in kwargs: extra_term = 0 else: extra_term = kwargs['extra_term'] @@ -54,10 +54,10 @@ def clean_ts(dataframe, **kwargs): ---------- name: string column to clean to apply. - limits: list, optional + limits: list, optional (0, 99999) Sensor limits. The function will fill with NaN in the values that exceed the band - window_size: int, optional + window_size: int, optional 3 If not None, will smooth the time series by applying a rolling window of that size window_type: str, optional @@ -82,7 +82,7 @@ def clean_ts(dataframe, **kwargs): result[result < lower_limit] = nan # Smoothing - if 'window_size' in kwargs: window = kwargs['window_size'] + if 'window_size' in kwargs: window = kwargs['window_size'] else: window = 3 if 'window_type' in kwargs: win_type = kwargs['window_type'] @@ -95,7 +95,7 @@ def clean_ts(dataframe, **kwargs): def merge_ts(dataframe, **kwargs): """ - Merges readings from sensors into one clean ts. The function checks the dispersion and + Merges readings from sensors into one clean ts. The function checks the dispersion and picks the desired one (min, max, min_nonzero, avg) Parameters ---------- @@ -104,19 +104,19 @@ def merge_ts(dataframe, **kwargs): pick: string 'min' One of the following 'min', 'max', 'avg', 'min_nonzero' - Which one two pick in case of high deviation between the metrics. Picks the avg + Which one two pick in case of high deviation between the metrics. Picks the avg otherwise factor: float (factor > 0) 0.3 Maximum allowed deviation of the difference with respect to the each of signals. It creates a window of [factor*signal_X, -factor*signal_X] for X being each signal - out of which there will be a flag where one of the signals will be picked. This + out of which there will be a flag where one of the signals will be picked. This factor should be set to a value that is similar to the sensor typical deviation Same parameters as clean_ts apply below: - limits: list, optional + limits: list, optional (0, 99999) Sensor limits. The function will fill with NaN in the values that exceed the band - window_size: int, optional + window_size: int, optional 3 If not None, will smooth the time series by applying a rolling window of that size window_type: str, optional @@ -139,15 +139,15 @@ def merge_ts(dataframe, **kwargs): else: factor = kwargs['factor'] # Clean them - for name in kwargs['names']: - subkwargs = {'name': name, - 'limits': kwargs['limits'], - 'window_size': kwargs['window_size'], + for name in kwargs['names']: + subkwargs = {'name': name, + 'limits': kwargs['limits'], + 'window_size': kwargs['window_size'], 'window_type': kwargs['window_type'] } df[name + '_CLEAN'] = clean_ts(df, **subkwargs) - + df['flag'] = full((df.shape[0], 1), False, dtype=bool) df['diff'] = df[kwargs['names'][0] + '_CLEAN'] - df[kwargs['names'][1] + '_CLEAN'] @@ -159,9 +159,9 @@ def merge_ts(dataframe, **kwargs): lnames.append(name + '_CLEAN') df['result'] = df.loc[:, lnames].mean(skipna=True, axis = 1) - + # Pick - if pick == 'min': + if pick == 'min': df.loc[df['flag'] == True, 'result'] = df.loc[df['flag'] == True, lnames].min(skipna=True, axis = 1) elif pick == 'max': df.loc[df['flag'] == True, 'result'] = df.loc[df['flag'] == True, lnames].max(skipna=True, axis = 1) @@ -177,7 +177,7 @@ def rolling_avg(dataframe, **kwargs): ---------- name: string column to clean to apply. - window_size: int, optional + window_size: int, optional 3 If not None, will smooth the time series by applying a rolling window of that size window_type: str, optional @@ -192,22 +192,22 @@ def rolling_avg(dataframe, **kwargs): Returns ------- pandas series containing the rolling average - """ + """ if 'name' not in kwargs: - std_out (f'{kwargs[name]} not in kwargs', 'ERROR') + logger.error (f'{kwargs[name]} not in kwargs') return None result = dataframe[kwargs['name']].copy() # Smoothing - if 'window_size' in kwargs: window = kwargs['window_size'] + if 'window_size' in kwargs: window = kwargs['window_size'] else: window = 3 if 'window_type' in kwargs: win_type = kwargs['window_type'] else: win_type = None - if 'type' in kwargs: + if 'type' in kwargs: if kwargs['type'] == 'mean': return result.rolling(window = window, win_type = win_type).mean() if kwargs['type'] == 'max': return result.rolling(window = window, win_type = win_type).max() if kwargs['type'] == 'min': return result.rolling(window = window, win_type = win_type).min() diff --git a/scdata/io/model.py b/scdata/io/model.py index 22a6d7b4..923440f5 100644 --- a/scdata/io/model.py +++ b/scdata/io/model.py @@ -1,76 +1,76 @@ -from scdata.utils import std_out +from scdata.utils import logger from joblib import dump, load from scdata._config import config from os.path import join, exists from os import makedirs def model_export(name = None, path = None, model = None, variables = None, hyperparameters = None, options = None, metrics = None): - + if name is None: - std_out('No name specified', 'ERROR') + logger.error('No name specified') return False - if path is None: + if path is None: path = config.paths['models'] - + modeldir = join(path, name) - if not exists(modeldir): + if not exists(modeldir): makedirs(modeldir) filename = join(modeldir, name) if hyperparameters is not None: - std_out('Saving hyperparameters') + logger.info('Saving hyperparameters') dump(hyperparameters, filename + '_hyperparameters.sav') - + if variables is not None: - std_out('Saving variables') + logger.info('Saving variables') dump(variables, filename + '_variables.sav') else: return False if model is not None: - std_out('Saving model') + logger.info('Saving model') dump(model, filename + '_model.sav', compress = 3) else: return False if options is not None: - std_out('Saving options') + logger.info('Saving options') dump(options, filename + '_options.sav') else: return False if metrics is not None: - std_out('Saving metrics') + logger.info('Saving metrics') dump(metrics, filename + '_metrics.sav') - else: return False + else: return False - std_out(f'Model: {name} saved in {modeldir}', 'SUCCESS') + logger.info(f'Model: {name} saved in {modeldir}') return True def model_load(name = '', path = None): - if path is None: + if path is None: path = config.paths['models'] - + modeldir = join(path, name) filename = join(modeldir, name) - std_out('Loading hyperparameters') + logger.info('Loading hyperparameters') hyperparameters = load(filename + '_hyperparameters.sav') - - std_out('Loading variables') + + logger.info('Loading variables') variables = load(filename + '_variables.sav') - std_out('Loading model') + logger.info('Loading model') model = load(filename + '_model.sav') - std_out('Loading options') - options = load(filename + '_options.sav') + logger.info('Loading options') + options = load(filename + '_options.sav') - std_out('Loading metrics') + logger.info('Loading metrics') metrics = load(filename + '_metrics.sav') - std_out(f'Model: {name} loaded', 'SUCCESS') + logger.info(f'Model: {name} loaded') return hyperparameters, variables, model, options, metrics \ No newline at end of file diff --git a/scdata/test/plot/box_plot.py b/scdata/test/plot/box_plot.py index 8a5abed6..656cd1d6 100644 --- a/scdata/test/plot/box_plot.py +++ b/scdata/test/plot/box_plot.py @@ -3,7 +3,7 @@ from matplotlib import style from seaborn import set_palette, boxplot # import seaborn as sns -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session @@ -31,23 +31,24 @@ def box_plot(self, **kwargs): Matplotlib figure """ - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); + if config.framework == 'jupyterlab': + plt.ioff() + plt.clf() if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._boxplot_def_fmt['mpl'] else: formatting = dict_fmerge(config._boxplot_def_fmt['mpl'], kwargs['formatting']) diff --git a/scdata/test/plot/heatmap_iplot.py b/scdata/test/plot/heatmap_iplot.py index 427bf439..90ca6895 100644 --- a/scdata/test/plot/heatmap_iplot.py +++ b/scdata/test/plot/heatmap_iplot.py @@ -1,5 +1,5 @@ from plotly.graph_objs import Heatmap, Layout, Figure -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session from plotly.offline import iplot @@ -13,8 +13,8 @@ def heatmap_iplot(self, **kwargs): Data for the plot, with the format: "traces": {"1": {"devices": '8019043', "channel" : "PM_10"} - } - options: dict + } + options: dict Options including data processing prior to plot. Defaults in config._plot_def_opt formatting: dict Name of auxiliary electrode found in dataframe. Defaults in config._heatmap_def_fmt @@ -24,20 +24,20 @@ def heatmap_iplot(self, **kwargs): """ if config.framework == 'jupyterlab': renderers.default = config.framework - if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + if 'traces' not in kwargs: + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._heatmap_def_fmt['plotly'] else: formatting = dict_fmerge(config._heatmap_def_fmt['plotly'], kwargs['formatting']) @@ -50,7 +50,7 @@ def heatmap_iplot(self, **kwargs): df, subplots = prepare_data(self, traces, options) n_subplots = len(subplots) - gskwags = {'frequency_hours': formatting['frequency_hours']} + gskwags = {'frequency_hours': formatting['frequency_hours']} dfgb, labels, yaxis, channel = groupby_session(df, **gskwags) xticks = [i.strftime("%Y-%m-%d") for i in dfgb.resample(formatting['session']).mean().index] diff --git a/scdata/test/plot/heatmap_plot.py b/scdata/test/plot/heatmap_plot.py index 3339346b..58f3ad3c 100644 --- a/scdata/test/plot/heatmap_plot.py +++ b/scdata/test/plot/heatmap_plot.py @@ -2,7 +2,7 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, heatmap -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session @@ -25,23 +25,24 @@ def heatmap_plot(self, **kwargs): Matplotlib figure """ - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); + if config.framework == 'jupyterlab': + plt.ioff() + plt.clf() if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._heatmap_def_fmt['mpl'] else: formatting = dict_fmerge(config._heatmap_def_fmt['mpl'], kwargs['formatting']) diff --git a/scdata/test/plot/maps.py b/scdata/test/plot/maps.py index 73852016..f14010ca 100644 --- a/scdata/test/plot/maps.py +++ b/scdata/test/plot/maps.py @@ -7,7 +7,7 @@ from math import isnan, floor, ceil from traceback import print_exc from pandas import cut, date_range -from scdata.utils import dict_fmerge, clean, std_out +from scdata.utils import dict_fmerge, clean, logger from scdata._config import config from numpy import linspace, nan from branca import element @@ -245,7 +245,7 @@ def device_metric_map(self, channel, start_date, end_date, options = dict()): _lat = self.devices[str(device)].api_device.lat _long = self.devices[str(device)].api_device.long except AttributeError: - std_out(f'Cannot retrieve [lat, long] from device {device}', 'WARNING') + logger.warning(f'Cannot retrieve [lat, long] from device {device}') pass continue @@ -373,24 +373,24 @@ def path_plot(self, channel = None, map_type = 'dynamic', devices = 'all', mdev = list() for device in devices: if device in self.devices: mdev.append(device) - else: std_out(f'Device {device} not found, ignoring', 'WARNING') + else: logger.warning(f'Device {device} not found, ignoring') if len(mdev) == 0: - std_out('Requested devices not in test', 'ERROR') + logger.error('Requested devices not in test') return None for device in mdev: chs = ['GPS_LAT','GPS_LONG'] if channel is not None: if channel not in self.devices[str(device)].readings.columns: - std_out(f'Channel {channel} not in columns: {self.devices[str(device)].readings.columns}', 'ERROR') + logger.error(f'Channel {channel} not in columns: {self.devices[str(device)].readings.columns}') return None # Get bins minmax = False if not options['minmax']: if all([key not in channel for key in config._channel_bins]): - std_out(f'Requested channel {channel} not in config mapped bins {config._channel_bins.keys()}.Using min/max mapping', 'WARNING') + logger.warning(f'Requested channel {channel} not in config mapped bins {config._channel_bins.keys()}.Using min/max mapping') minmax = True else: minmax = True @@ -437,8 +437,8 @@ def path_plot(self, channel = None, map_type = 'dynamic', devices = 'all', color = str(dfc.loc[date, 'COLOR']) if color == 'nan' or isnan(dfc.loc[date, 'GPS_LONG'])\ or isnan(dfc.loc[date, 'GPS_LAT']): - std_out(f'Skipping point {date}', 'WARNING'); continue - + logger.warning(f'Skipping point {date}') + continue geometry = { 'type': 'LineString', 'coordinates': [[dfc.loc[date, 'GPS_LONG'], @@ -543,7 +543,7 @@ def path_plot(self, channel = None, map_type = 'dynamic', devices = 'all', box-shadow: 2px; """, max_width=800, - ); + ) GeoJson(featurecol, tooltip=tooltip, @@ -567,7 +567,7 @@ def path_plot(self, channel = None, map_type = 'dynamic', devices = 'all', ).add_to(m) else: - std_out(f'Not supported map type {map_type}', 'ERROR') + logger.error(f'Not supported map type {map_type}') return None if options['minimap']: diff --git a/scdata/test/plot/plot_tools.py b/scdata/test/plot/plot_tools.py index 04b824f5..a5bf33b5 100644 --- a/scdata/test/plot/plot_tools.py +++ b/scdata/test/plot/plot_tools.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out +from scdata.utils import logger from numpy import arange from pandas import cut, DataFrame, to_datetime, option_context, to_numeric import io @@ -67,7 +67,7 @@ def to_png_b64(fig, dpi = 150): def prepare_data(test, traces, options): - std_out('Preparing data for plot') + logger.info('Preparing data for plot') # Dataframe to return df = DataFrame() @@ -79,9 +79,9 @@ def prepare_data(test, traces, options): if 'subplot' in traces[trace].keys(): n_subplots = max(n_subplots, traces[trace]['subplot']) else: - std_out (f'Trace {trace} not assigned to subplot. Skipping', 'WARNING') + logger.warning (f'Trace {trace} not assigned to subplot. Skipping') - std_out (f'Making {n_subplots} subplots') + logger.info (f'Making {n_subplots} subplots') # Generate list of subplots subplots = [[] for x in range(n_subplots)] @@ -90,52 +90,51 @@ def prepare_data(test, traces, options): for trace in traces.keys(): if 'subplot' not in traces[trace].keys(): - std_out(f'The trace {traces[trace]} was not placed in any subplot. Assuming subplot #1', 'WARNING') + logger.warning(f'The trace {traces[trace]} was not placed in any subplot. Assuming subplot #1') traces[trace]['subplot'] = 1 ndevs = traces[trace]['devices'] nchans = traces[trace]['channel'] # Make them lists always - if ndevs == 'all': devices = list(test.devices.keys()) + if ndevs == 'all': devices = [device.id for device in test.devices] elif type(ndevs) == str or type(ndevs) == int: devices = [ndevs] else: devices = ndevs + print (devices) - for device in devices: - - ndev = str(device) + for ndev in devices: # Make them lists always - if nchans == 'all': channels = list(test.devices[ndev].readings.columns) + if nchans == 'all': channels = list(test.get_device(ndev).data.columns) elif type(nchans) == str: channels = [nchans] else: channels = nchans for channel in channels: # Check if device is in columns - if channel not in test.devices[ndev].readings.columns: - std_out(f'The device {ndev} does not contain {channel}. Ignoring', 'WARNING') + if channel not in test.get_device(ndev).data.columns: + logger.warning(f'The device {ndev} does not contain {channel}. Ignoring') continue # Put channel in subplots - subplots[traces[trace]['subplot']-1].append(channel + '_' + ndev) + subplots[traces[trace]['subplot']-1].append(f'{channel}_{ndev}') column_orig = [channel] - columns_add = [channel + '_' + ndev] + columns_add = [f'{channel}_{ndev}'] # Add filtering name to dfdev if 'filter' in traces[trace]: col_name = traces[trace]['filter']['col'] - if col_name not in test.devices[ndev].readings.columns: - std_out(f'Column {col_name} not in dataframe. Ignoring filtering', 'WARNING') + if col_name not in test.get_device(ndev).data.columns: + logger.warning(f'Column {col_name} not in dataframe. Ignoring filtering') else: column_orig.append(col_name) columns_add.append(col_name) # Device dataframe - dfdev = DataFrame(test.devices[ndev].readings[column_orig].values, + dfdev = DataFrame(test.get_device(ndev).data[column_orig].values, columns = columns_add, - index = test.devices[ndev].readings.index) + index = test.get_device(ndev).data.index) # Add filtering function if 'filter' in traces[trace]: @@ -154,7 +153,7 @@ def prepare_data(test, traces, options): elif relationship == '>': dfdev.loc[dfdev[col_name]>value] else: - std_out(f"Not valid relationship. Valid options: '==', '<=', '>=', '<', '>'", 'ERROR') + logger.error(f"Not valid relationship. Valid options: '==', '<=', '>=', '<', '>'") continue # Remove column for filtering from dfdev dfdev.drop(columns=[col_name], inplace = True) @@ -170,7 +169,7 @@ def prepare_data(test, traces, options): nextras = list() for device in traces[trace]['devices']: for channel in traces[trace]['channel']: - nextras.append(channel + '_' + str(device)) + nextras.append(f'{channel}_{ndev}') if extra == 'bands': ubn = channel + f"-{trace}-{'UPPER-BAND'}" @@ -207,11 +206,11 @@ def prepare_data(test, traces, options): df = df.astype(float, errors='ignore') if df.empty: - std_out('Empty dataframe for plot', 'ERROR') + logger.error('Empty dataframe for plot') return None, None # Resample it if options['frequency'] is not None: - std_out(f"Resampling at {options['frequency']}", "INFO") + logger.info(f"Resampling at {options['frequency']}") if 'resample' in options: @@ -229,7 +228,7 @@ def prepare_data(test, traces, options): if options['clean_na'] == 'drop': df.dropna(axis = 0, how='any') - if df.empty: std_out('Dataframe for selected options is empty', 'WARNING') + if df.empty: logger.warning('Dataframe for selected options is empty') return df, subplots diff --git a/scdata/test/plot/scatter_dispersion_grid.py b/scdata/test/plot/scatter_dispersion_grid.py index 358958ca..3c7a6860 100644 --- a/scdata/test/plot/scatter_dispersion_grid.py +++ b/scdata/test/plot/scatter_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out +from scdata.utils import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm @@ -14,7 +14,7 @@ def scatter_dispersion_grid(self, **kwargs): ---------- channels: list Channel - options: dict + options: dict Options including data processing prior to plot. Defaults in config._plot_def_opt formatting: dict Formatting dict. Defaults in config._ts_plot_def_fmt @@ -25,37 +25,37 @@ def scatter_dispersion_grid(self, **kwargs): if self.common_channels == []: self.get_common_channels() if 'channels' not in kwargs: - std_out('Using common channels') + logger.info('Using common channels') channels = self.common_channels else: channels = kwargs['channels'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['mpl'] else: - formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) + formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) if self.dispersion_df is None: - std_out('Perform dispersion analysis first!', 'ERROR') + logger.error('Perform dispersion analysis first!') return None if len(self.devices)>30: distribution = 'normal' - std_out('Using normal distribution') - std_out(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") + logger.info('Using normal distribution') + logger.info(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") else: distribution = 't-student' - std_out(f'Using t-student distribution.') - + logger.info(f'Using t-student distribution.') + # Number of subplots - number_of_subplots = len(channels) + number_of_subplots = len(channels) if number_of_subplots % 2 == 0: cols = 2 else: cols = 2 rows = int(ceil(number_of_subplots / cols)) @@ -70,17 +70,17 @@ def scatter_dispersion_grid(self, **kwargs): for channel in channels: if channel not in self.common_channels: - std_out(f'Channel {channel} not in common_channels') + logger.info(f'Channel {channel} not in common_channels') continue if channel in config._dispersion['ignore_channels']: - std_out(f'Channel {channel} ignored per config') - continue - + logger.info(f'Channel {channel} ignored per config') + continue + ax = figure.add_subplot(gs[n]) n += 1 - + dispersion_avg = self._dispersion_summary[channel] - + if distribution: limit_confidence = config._dispersion['limit_confidence_sigma'] @@ -100,35 +100,35 @@ def scatter_dispersion_grid(self, **kwargs): limit_confidence = t.interval(config._dispersion['t_confidence_level']/100.0, len(self.devices), loc=self.dispersion_df[channel + '_AVG'], scale=dispersion_avg) upper_bound = limit_confidence[1] - lower_bound = limit_confidence[0] - + lower_bound = limit_confidence[0] + for device in list(self.devices): color = cm.viridis.colors[round(list(self.devices).index(device)\ *len(cm.viridis.colors)/len(list(self.devices)))] - plt.scatter(self.dispersion_df[channel + '_AVG'], - self.dispersion_df[channel + '-' + device], + plt.scatter(self.dispersion_df[channel + '_AVG'], + self.dispersion_df[channel + '-' + device], label = device, alpha = 0.3, color = color) - - plt.plot([min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], - [min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], + + plt.plot([min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], + [min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], 'r', label = 'AVG', alpha = 0.9, linewidth = 1.5) plt.plot([min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], - [min(lower_bound), max(lower_bound)], + [min(lower_bound), max(lower_bound)], 'g', label = 'AVG ± σSTD', alpha = 0.8, linewidth = 1.5) - + plt.plot([min(self.dispersion_df[channel + '_AVG']), max(self.dispersion_df[channel + '_AVG'])], - [min(upper_bound), - max(upper_bound)], + [min(upper_bound), + max(upper_bound)], 'g', alpha = 0.8, linewidth = 1.5) - + plt.legend(bbox_to_anchor=(1, 0.4), fancybox=True, loc='center left', ncol = 2) plt.xlabel('Refererence (avg. of test)') plt.ylabel('Individual device (-)') plt.title(f"Dispersion analysis for {channel} sensor - STD = {round(self.dispersion_df[channel + '_STD'].mean(), 2)}") plt.grid() - + plt.subplots_adjust(top = formatting['suptitle_factor']); if options['show']: plt.show() diff --git a/scdata/test/plot/scatter_iplot.py b/scdata/test/plot/scatter_iplot.py index 8f216e00..bd19ab90 100644 --- a/scdata/test/plot/scatter_iplot.py +++ b/scdata/test/plot/scatter_iplot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from .scatter_plot import scatter_plot from scdata._config import config from plotly.io import renderers @@ -16,9 +16,9 @@ def scatter_iplot(self, **kwargs): "channel": "EXT_PM_A_1"}, "2": {"devices": "10751", "channel": "EXT_PM_A_10" - } - } - options: dict + } + } + options: dict Options including data processing prior to plot. Defaults in config._plot_def_opt formatting: dict Name of auxiliary electrode found in dataframe. Defaults in config._corr_plot_def_fmt @@ -26,24 +26,23 @@ def scatter_iplot(self, **kwargs): ------- Plotly figure """ - std_out ('Not yet working', 'ERROR') - return None + raise NotImplementedError if config.framework == 'jupyterlab': renderers.default = config.framework - if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + if 'traces' not in kwargs: + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._scatter_plot_def_fmt['plotly'] else: formatting = dict_fmerge(config._scatter_plot_def_fmt['plotly'], kwargs['formatting']) @@ -51,13 +50,13 @@ def scatter_iplot(self, **kwargs): # Set options to not show in scatter_plot toshow = options['show'] options['show'] = False - + # Make sns plot mfig = scatter_plot(self, traces = traces, options = options, formatting = formatting) options['show'] = toshow - + pfig = tls.mpl_to_plotly(mfig); - if options['show']: pfig.show(); + if options['show']: pfig.show(); return pfig \ No newline at end of file diff --git a/scdata/test/plot/scatter_plot.py b/scdata/test/plot/scatter_plot.py index 0b0cc134..cd02f1e4 100644 --- a/scdata/test/plot/scatter_plot.py +++ b/scdata/test/plot/scatter_plot.py @@ -2,7 +2,7 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, regplot, scatterplot, relplot -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, colors from numpy import array @@ -37,23 +37,24 @@ def scatter_plot(self, **kwargs): Matplotlib figure and axes """ - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); + if config.framework == 'jupyterlab': + plt.ioff() + plt.clf() if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._scatter_plot_def_fmt['mpl'] else: formatting = dict_fmerge(config._scatter_plot_def_fmt['mpl'], kwargs['formatting']) @@ -173,7 +174,7 @@ def scatter_plot(self, **kwargs): try: ax.set_ylabel(formatting['ylabel']); except: - std_out (f'y_label for subplot {subplots.index(i)} not set', 'WARNING') + logger.warning (f'y_label for subplot {subplots.index(i)} not set') ax.set_ylabel('') pass else: @@ -183,7 +184,7 @@ def scatter_plot(self, **kwargs): try: ax.set_xlabel(formatting['xlabel']); except: - std_out (f'x_label for subplot {subplots.index(i)} not set', 'WARNING') + logger.warning (f'x_label for subplot {subplots.index(i)} not set') ax.set_xlabel('') pass else: @@ -208,7 +209,7 @@ def scatter_plot(self, **kwargs): try: ax.set_ylim(formatting['yrange']); except: - std_out (f'yrange for subplot {subplots.index(i)} not set', 'WARNING') + logger.warning (f'yrange for subplot {subplots.index(i)} not set') pass elif formatting['sharey']: ax.set_ylim(min([yl[0] for yl in y_axes]), max([yl[1] for yl in y_axes])) @@ -218,7 +219,7 @@ def scatter_plot(self, **kwargs): try: ax.set_xlim(formatting['xrange']); except: - std_out (f'xrange for subplot {subplots.index(i)} not set', 'WARNING') + logger.warning (f'xrange for subplot {subplots.index(i)} not set') pass elif formatting['sharex']: ax.set_xlim(min([xl[0] for xl in x_axes]), max([xl[1] for xl in x_axes])) diff --git a/scdata/test/plot/ts_dendrogram.py b/scdata/test/plot/ts_dendrogram.py index 250f839c..a68bdb85 100644 --- a/scdata/test/plot/ts_dendrogram.py +++ b/scdata/test/plot/ts_dendrogram.py @@ -1,6 +1,6 @@ from scipy.cluster import hierarchy as hc from pandas import DataFrame -from scdata.utils import std_out, dict_fmerge, clean +from scdata.utils import logger, dict_fmerge, clean from scdata._config import config import matplotlib.pyplot as plt from matplotlib import rcParams @@ -8,7 +8,7 @@ def ts_dendrogram(self, **kwargs): """ - Plots dendrogram of devices and channels in matplotlib plot. Takes all the channels + Plots dendrogram of devices and channels in matplotlib plot. Takes all the channels in channels that are in the test `devices` Parameters ---------- @@ -30,27 +30,27 @@ def ts_dendrogram(self, **kwargs): Returns ------- Dendrogram matrix, shows plot - """ + """ if 'metric' not in kwargs: metric = 'correlation' else: metric = kwargs['metric'] - + if 'method' not in kwargs: method = 'single' else: method = kwargs['method'] - + if 'devices' not in kwargs: devices = list(self.devices.keys()) else: devices = kwargs['devices'] - + if 'channels' not in kwargs: channels = 'all' else: channels = kwargs['channels'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._dendrogram_def_fmt['mpl'] else: formatting = dict_fmerge(config._dendrogram_def_fmt['mpl'], kwargs['formatting']) @@ -58,32 +58,32 @@ def ts_dendrogram(self, **kwargs): # Style if formatting['style'] is not None: style.use(formatting['style']) else: style.use(config._plot_style) - + # Palette if formatting['palette'] is not None: set_palette(formatting['palette']) # Size sanity check - if formatting['width'] > 50: + if formatting['width'] > 50: - std_out('Reducing width to 12') + logger.info('Reducing width to 12') formatting['width'] = 12 - - if formatting['height'] > 50: - std_out('Reducing height to 10') - formatting['height'] = 10 + if formatting['height'] > 50: + + logger.info('Reducing height to 10') + formatting['height'] = 10 # Font size if formatting['fontsize'] is not None: rcParams.update({'font.size': formatting['fontsize']}); - + df = DataFrame() - + for device in devices: dfd = self.devices[device].readings.copy() dfd = dfd.resample(options['frequency']).mean() - - if channels != 'all': + + if channels != 'all': for channel in channels: if channel in dfd.columns: df = df.append(dfd[channel].rename(device+'_'+channel)) else: df = df.append(dfd) @@ -91,7 +91,7 @@ def ts_dendrogram(self, **kwargs): if options['clean_na'] is not None: if options['clean_na'] == 'drop': df.dropna(axis = 1, inplace=True) if options['clean_na'] == 'fill': df = df.fillna(method='ffill') - + Z = hc.linkage(df, method = method, metric = metric) # Plot dendogram @@ -106,7 +106,7 @@ def ts_dendrogram(self, **kwargs): leaf_font_size=formatting['fontsize'], # font size for the x axis labels labels=df.index ) - + plt.show() - + return Z diff --git a/scdata/test/plot/ts_dispersion_grid.py b/scdata/test/plot/ts_dispersion_grid.py index 22cae06c..18b418f1 100644 --- a/scdata/test/plot/ts_dispersion_grid.py +++ b/scdata/test/plot/ts_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out +from scdata.utils import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm @@ -14,7 +14,7 @@ def ts_dispersion_grid(self, **kwargs): ---------- channels: list Channel - options: dict + options: dict Options including data processing prior to plot. Defaults in config._plot_def_opt formatting: dict Formatting dict. Defaults in config._ts_plot_def_fmt @@ -25,37 +25,37 @@ def ts_dispersion_grid(self, **kwargs): if self.common_channels == []: self.get_common_channels() if 'channels' not in kwargs: - std_out('Using common channels') + logger.info('Using common channels') channels = self.common_channels else: channels = kwargs['channels'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['mpl'] else: - formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) + formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) if self.dispersion_df is None: - std_out('Perform dispersion analysis first!', 'ERROR') + logger.info('Perform dispersion analysis first!', 'ERROR') return None if len(self.devices)>30: distribution = 'normal' - std_out('Using normal distribution') - std_out(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") + logger.info('Using normal distribution') + logger.info(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") else: distribution = 't-student' - std_out(f'Using t-student distribution.') - + logger.info(f'Using t-student distribution.') + # Number of subplots - number_of_subplots = len(channels) + number_of_subplots = len(channels) if number_of_subplots % 2 == 0: cols = 2 else: cols = 2 rows = int(ceil(number_of_subplots / cols)) @@ -70,17 +70,17 @@ def ts_dispersion_grid(self, **kwargs): for channel in channels: if channel not in self.common_channels: - std_out(f'Channel {channel} not in common_channels') + logger.info(f'Channel {channel} not in common_channels') continue if channel in config._dispersion['ignore_channels']: - std_out(f'Channel {channel} ignored per config') - continue - + logger.info(f'Channel {channel} ignored per config') + continue + ax = figure.add_subplot(gs[n]) n += 1 - + dispersion_avg = self._dispersion_summary[channel] - + if distribution: limit_confidence = config._dispersion['limit_confidence_sigma'] @@ -100,23 +100,23 @@ def ts_dispersion_grid(self, **kwargs): limit_confidence = t.interval(config._dispersion['t_confidence_level']/100.0, len(self.devices), loc=self.dispersion_df[channel + '_AVG'], scale=dispersion_avg) upper_bound = limit_confidence[1] - lower_bound = limit_confidence[0] - + lower_bound = limit_confidence[0] + for device in list(self.devices): color = cm.viridis.colors[round(list(self.devices).index(device)\ *len(cm.viridis.colors)/len(list(self.devices)))] - plt.plot(self.dispersion_df.index, - self.dispersion_df[channel + '-' + device], + plt.plot(self.dispersion_df.index, + self.dispersion_df[channel + '-' + device], label = device, alpha = 0.3, color = color) - + plt.plot(self.dispersion_df.index, self.dispersion_df[channel + '_AVG'], 'r', label = 'AVG', alpha = 0.9, linewidth = 1.5) - plt.plot(self.dispersion_df.index, lower_bound, + plt.plot(self.dispersion_df.index, lower_bound, 'g', label = 'AVG ± σSTD', alpha = 0.8, linewidth = 1.5) - plt.plot(self.dispersion_df.index, upper_bound, + plt.plot(self.dispersion_df.index, upper_bound, 'g', alpha = 0.8, linewidth = 1.5) plt.legend(bbox_to_anchor=(1, 0.5), fancybox=True, loc='center left', ncol = 2) diff --git a/scdata/test/plot/ts_dispersion_plot.py b/scdata/test/plot/ts_dispersion_plot.py index ca3095d2..41d761c1 100644 --- a/scdata/test/plot/ts_dispersion_plot.py +++ b/scdata/test/plot/ts_dispersion_plot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out +from scdata.utils import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.colors @@ -15,7 +15,7 @@ def ts_dispersion_plot(self, **kwargs): ---------- channel: string Channel - options: dict + options: dict Options including data processing prior to plot. Defaults in config._plot_def_opt formatting: dict Formatting dict. Defaults in config._ts_plot_def_fmt @@ -25,62 +25,62 @@ def ts_dispersion_plot(self, **kwargs): ''' if 'channel' not in kwargs: - std_out('Needs at least one channel to plot') + logger.info('Needs at least one channel to plot') return None else: channel = kwargs['channel'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['mpl'] else: - formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) + formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) if self.dispersion_df is None: - std_out('Perform dispersion analysis first!', 'ERROR') + logger.error('Perform dispersion analysis first!') return None if self.common_channels == []: self.get_common_channels() if channel not in self.common_channels: - std_out(f'Channel {channel} not in common_channels') + logger.info(f'Channel {channel} not in common_channels') return None if channel in config._dispersion['ignore_channels']: - std_out(f'Channel {channel} ignored per config') + logger.info(f'Channel {channel} ignored per config') return None if len(self.devices)>config._dispersion['nt_threshold']: distribution = 'normal' - std_out('Using normal distribution') - std_out(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") + logger.info('Using normal distribution') + logger.info(f"Using limit for sigma confidence: {config._dispersion['limit_confidence_sigma']}") else: distribution = 't-student' - std_out(f'Using t-student distribution.') + logger.info(f'Using t-student distribution.') # Size sanity check - if formatting['width'] > 50: - std_out('Reducing width to 12') + if formatting['width'] > 50: + logger.info('Reducing width to 12') formatting['width'] = 12 if formatting['height'] > 50: - std_out('Reducing height to 10') - formatting['height'] = 10 + logger.info('Reducing height to 10') + formatting['height'] = 10 # Make subplot - figure, (ax_tbr, ax_ok) = plt.subplots(nrows = 2, + figure, (ax_tbr, ax_ok) = plt.subplots(nrows = 2, sharex = formatting['sharex'], figsize = (formatting['width'], formatting['height']) ); # cmap = plt.cm.Reds - norm = matplotlib.colors.Normalize(vmin=0, + norm = matplotlib.colors.Normalize(vmin=0, vmax=config._dispersion['limit_errors']/2) ch_index = self.common_channels.index(channel)+1 - + # Style if formatting['style'] is not None: style.use(formatting['style']) else: style.use(config._plot_style) @@ -106,13 +106,13 @@ def ts_dispersion_plot(self, **kwargs): lower_bound = self.dispersion_df[channel + '_AVG']\ - abs(limit_confidence * dispersion_avg) else: - limit_confidence = t.interval(config._dispersion['t_confidence_level']/100.0, len(self.devices), + limit_confidence = t.interval(config._dispersion['t_confidence_level']/100.0, len(self.devices), loc=self.dispersion_df[channel + '_AVG'], scale=dispersion_avg) upper_bound = limit_confidence[1] lower_bound = limit_confidence[0] for device in self.devices: - ncol = channel + '-' + device + ncol = channel + '-' + device if ncol in self.dispersion_df.columns: # Count how many times we go above the upper bound or below the lower one @@ -128,19 +128,19 @@ def ts_dispersion_plot(self, **kwargs): max_number_errors = len(count_problems) if number_errors/max_number_errors > config._dispersion['limit_errors']/100: - std_out (f"Device {device} out of {config._dispersion['limit_errors']}% limit\ - - {np.round(number_errors/max_number_errors*100, 1)}% out", 'WARNING') + logger.warning (f"Device {device} out of {config._dispersion['limit_errors']}% limit\ + - {np.round(number_errors/max_number_errors*100, 1)}% out") alpha = 1 - ax_tbr.plot(self.dispersion_df.index, - self.dispersion_df[ncol], + ax_tbr.plot(self.dispersion_df.index, + self.dispersion_df[ncol], color = 'r', label = device, alpha = alpha) else: alpha = 1 color = 'g' - ax_ok.plot(self.dispersion_df.index, - self.dispersion_df[ncol], - color = color, + ax_ok.plot(self.dispersion_df.index, + self.dispersion_df[ncol], + color = color, label = device, alpha = alpha) # Add upper and low bound bound to subplot 1 diff --git a/scdata/test/plot/ts_dispersion_uplot.py b/scdata/test/plot/ts_dispersion_uplot.py index ccc2eac6..5ed167e5 100644 --- a/scdata/test/plot/ts_dispersion_uplot.py +++ b/scdata/test/plot/ts_dispersion_uplot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import colors from scipy.stats import t @@ -58,19 +58,19 @@ def ts_dispersion_uplot(self, **kwargs): ''' if 'channel' not in kwargs: - std_out('Needs at least one channel to plot') + logger.info('Needs at least one channel to plot') return None else: channel = kwargs['channel'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['uplot'] else: formatting = dict_fmerge(config._ts_plot_def_fmt['uplot'], @@ -78,36 +78,36 @@ def ts_dispersion_uplot(self, **kwargs): # Size sanity check if formatting['width'] < 100: - std_out('Setting width to 800') + logger.info('Setting width to 800') formatting['width'] = 800 if formatting['height'] < 100: - std_out('Reducing height to 600') + logger.info('Reducing height to 600') formatting['height'] = 600 if 'html' not in options: options['html'] = False if self.dispersion_df is None: - std_out('Perform dispersion analysis first!', 'ERROR') + logger.error('Perform dispersion analysis first!') return None if self.common_channels == []: self.get_common_channels() if channel not in self.common_channels: - std_out(f'Channel {channel} not in common_channels') + logger.info(f'Channel {channel} not in common_channels') return None if channel in config._dispersion['ignore_channels']: - std_out(f'Channel {channel} ignored per config') + logger.info(f'Channel {channel} ignored per config') return None if len(self.devices)>config._dispersion['nt_threshold']: distribution = 'normal' - std_out('Using normal distribution') - std_out(f"Using limit for sigma confidence:\ + logger.info('Using normal distribution') + logger.info(f"Using limit for sigma confidence:\ {config._dispersion['limit_confidence_sigma']}") else: distribution = 't-student' - std_out(f'Using t-student distribution.') + logger.info(f'Using t-student distribution.') ch_index = self.common_channels.index(channel)+1 total_number = len(self.common_channels) @@ -169,8 +169,8 @@ def ts_dispersion_uplot(self, **kwargs): # TBR if number_errors/max_number_errors > config._dispersion['limit_errors']/100: - std_out (f"Device {device} out of {config._dispersion['limit_errors']}% limit\ - - {np.round(number_errors/max_number_errors*100, 1)}% out", 'WARNING') + logger.warning (f"Device {device} out of {config._dispersion['limit_errors']}% limit\ + - {np.round(number_errors/max_number_errors*100, 1)}% out") subplots[0].append(ncol) #OK else: diff --git a/scdata/test/plot/ts_iplot.py b/scdata/test/plot/ts_iplot.py index dddf66e3..744d6356 100644 --- a/scdata/test/plot/ts_iplot.py +++ b/scdata/test/plot/ts_iplot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data @@ -36,19 +36,19 @@ def ts_iplot(self, **kwargs): if config.framework == 'jupyterlab': renderers.default = config.framework if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options', 'WARNING') + logger.warning('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting', 'WARNING') + logger.warning('Using default formatting') formatting = config._ts_plot_def_fmt['plotly'] else: formatting = dict_fmerge(config._ts_plot_def_fmt['plotly'], kwargs['formatting']) @@ -64,10 +64,10 @@ def ts_iplot(self, **kwargs): # Size sanity check if formatting['width'] < 100: - std_out('Setting width to 800') + logger.info('Setting width to 800') formatting['width'] = 800 if formatting['height'] < 100: - std_out('Reducing height to 600') + logger.info('Reducing height to 600') formatting['height'] = 600 figure = make_subplots(rows = n_subplots, cols=1, diff --git a/scdata/test/plot/ts_plot.py b/scdata/test/plot/ts_plot.py index d194484c..ab05ea2c 100644 --- a/scdata/test/plot/ts_plot.py +++ b/scdata/test/plot/ts_plot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data from pandas import to_datetime @@ -34,23 +34,24 @@ def ts_plot(self, **kwargs): Matplotlib figure """ - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); + if config.framework == 'jupyterlab': + plt.ioff() + plt.clf() if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['mpl'] else: formatting = dict_fmerge(config._ts_plot_def_fmt['mpl'], kwargs['formatting']) @@ -77,10 +78,10 @@ def ts_plot(self, **kwargs): # Size sanity check if formatting['width'] > 50: - std_out('Reducing width to 12') + logger.info('Reducing width to 12') formatting['width'] = 12 if formatting['height'] > 50: - std_out('Reducing height to 10') + logger.info('Reducing height to 10') formatting['height'] = 10 # Plot diff --git a/scdata/test/plot/ts_scatter.py b/scdata/test/plot/ts_scatter.py index df135e7f..bd01a7a6 100644 --- a/scdata/test/plot/ts_scatter.py +++ b/scdata/test/plot/ts_scatter.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data @@ -36,23 +36,24 @@ def ts_scatter(self, **kwargs): Matplotlib figure containing timeseries and scatter plot with correlation coefficients on it """ - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); + if config.framework == 'jupyterlab': + plt.ioff() + plt.clf() if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.error('No traces defined') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_scatter_def_fmt['mpl'] else: formatting = dict_fmerge(config._ts_scatter_def_fmt['mpl'], kwargs['formatting']) @@ -96,9 +97,9 @@ def ts_scatter(self, **kwargs): pearsonCorr = list(df.corr('pearson')[list(df.columns)[0]])[-1] rmse = sqrt(mean_squared_error(df[feature_trace].fillna(0), df[ref_trace].fillna(0))) - std_out (f'Pearson correlation coefficient: {pearsonCorr}') - std_out (f'Coefficient of determination R²: {pearsonCorr*pearsonCorr}') - std_out (f'RMSE: {rmse}') + logger.info (f'Pearson correlation coefficient: {pearsonCorr}') + logger.info (f'Coefficient of determination R²: {pearsonCorr*pearsonCorr}') + logger.info (f'RMSE: {rmse}') # Time Series plot ax1.plot(df.index, df[feature_trace], color = 'g', label = feature_trace, linewidth = 1, alpha = 0.9) diff --git a/scdata/test/plot/ts_uplot.py b/scdata/test/plot/ts_uplot.py index c52a5ea9..c923e060 100644 --- a/scdata/test/plot/ts_uplot.py +++ b/scdata/test/plot/ts_uplot.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge +from scdata.utils import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, colors @@ -68,29 +68,29 @@ def ts_uplot(self, **kwargs): ''' if 'traces' not in kwargs: - std_out('No traces defined', 'ERROR') + logger.info('No traces defined', 'ERROR') return None else: traces = kwargs['traces'] if 'options' not in kwargs: - std_out('Using default options') + logger.info('Using default options') options = config._plot_def_opt else: options = dict_fmerge(config._plot_def_opt, kwargs['options']) if 'formatting' not in kwargs: - std_out('Using default formatting') + logger.info('Using default formatting') formatting = config._ts_plot_def_fmt['uplot'] else: formatting = dict_fmerge(config._ts_plot_def_fmt['uplot'], kwargs['formatting']) # Size sanity check if formatting['width'] < 100: - std_out('Setting width to 800') + logger.info('Setting width to 800') formatting['width'] = 800 if formatting['height'] < 100: - std_out('Reducing height to 600') + logger.info('Reducing height to 600') formatting['height'] = 600 if 'html' not in options: diff --git a/scdata/utils/__init__.py b/scdata/utils/__init__.py index c1dff251..7b92654e 100644 --- a/scdata/utils/__init__.py +++ b/scdata/utils/__init__.py @@ -1,4 +1,4 @@ -from .out import std_out +from .out import logger from .date import localise_date, find_dates from .units import get_units_convf from .dictmerge import dict_fmerge diff --git a/scdata/utils/lazy.py b/scdata/utils/lazy.py index a7208951..7273a7f2 100644 --- a/scdata/utils/lazy.py +++ b/scdata/utils/lazy.py @@ -1,5 +1,5 @@ import sys -from .out import std_out +from .out import logger class LazyCallable(object): ''' @@ -11,7 +11,7 @@ def __init__(self, name): self.f = None def __call__(self, *a, **k): if self.f is None: - std_out(f"Loading {self.n.rsplit('.', 1)[1]} from {self.n.rsplit('.', 1)[0]}") + logger.info(f"Loading {self.n.rsplit('.', 1)[1]} from {self.n.rsplit('.', 1)[0]}") modn, funcn = self.n.rsplit('.', 1) if modn not in sys.modules: __import__(modn) diff --git a/scdata/utils/location.py b/scdata/utils/location.py index 576ed356..1f07fbd0 100644 --- a/scdata/utils/location.py +++ b/scdata/utils/location.py @@ -1,6 +1,6 @@ from requests import get from pandas import json_normalize -from scdata.utils import std_out +from scdata.utils import logger from scdata._config import config def get_elevation(_lat = None, _long = None): @@ -12,7 +12,7 @@ def get_elevation(_lat = None, _long = None): https://stackoverflow.com/questions/19513212/can-i-get-the-altitude-with-geopy-in-python-with-longitude-latitude ''' if _lat is None or _long is None: return None - + query = ('https://api.open-elevation.com/api/v1/lookup' f'?locations={_lat},{_long}') @@ -21,7 +21,7 @@ def get_elevation(_lat = None, _long = None): try: r = get(query, timeout = config._timeout) except: - std_out(f'Cannot get altitude from {query}') + logger.info(f'Cannot get altitude from {query}') error = True pass diff --git a/scdata/utils/out.py b/scdata/utils/out.py index 99f71d32..68c2cb67 100644 --- a/scdata/utils/out.py +++ b/scdata/utils/out.py @@ -3,32 +3,37 @@ from datetime import datetime import sys -def block_print(): - sys.stdout = open(os.devnull, 'w') +import logging -def enable_print(): - sys.stdout = sys.__stdout__ +class CutsomLoggingFormatter(logging.Formatter): -def std_out(msg, mtype = None, force = False): - out_level = config._out_level - if config._timestamp == True: - stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - else: - stamp = '' - # Output levels: - # 'QUIET': nothing, - # 'NORMAL': warn, err - # 'DEBUG': info, warn, err, success - if force == True: priority = 2 - elif out_level == 'QUIET': priority = 0 - elif out_level == 'NORMAL': priority = 1 - elif out_level == 'DEBUG': priority = 2 + grey = "\x1b[38;20m" + yellow = "\x1b[33;20m" + red = "\x1b[31;20m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + format_min = "[%(asctime)s] - %(name)s - %(levelname)s - %(message)s" + format_deb = "[%(asctime)s] - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" - if mtype is None and priority>1: - print(f'[{stamp}] - ' + '[INFO] ' + msg) - elif mtype == 'SUCCESS' and priority>0: - print(f'[{stamp}] - ' + colored('[SUCCESS] ', 'green') + msg) - elif mtype == 'WARNING' and priority>0: - print(f'[{stamp}] - ' + colored('[WARNING] ', 'yellow') + msg) - elif mtype == 'ERROR' and priority>0: - print(f'[{stamp}] - ' + colored('[ERROR] ', 'red') + msg) \ No newline at end of file + FORMATS = { + logging.DEBUG: grey + format_min + reset, + logging.INFO: grey + format_min + reset, + logging.WARNING: yellow + format_min + reset, + logging.ERROR: red + format_deb + reset, + logging.CRITICAL: bold_red + format_deb + reset + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + +logger = logging.getLogger('scdata') +logger.setLevel(config.log_level) +ch = logging.StreamHandler(sys.stdout) +ch.setLevel(config.log_level) +ch.setFormatter(CutsomLoggingFormatter()) +logger.addHandler(ch) + +def set_logger_level(level=logging.DEBUG): + logger.setLevel(level) diff --git a/scdata/utils/units.py b/scdata/utils/units.py index faf4219f..9fa4af14 100644 --- a/scdata/utils/units.py +++ b/scdata/utils/units.py @@ -1,5 +1,5 @@ from re import search -from scdata.utils.out import std_out +from scdata.utils import logger from scdata._config import config def get_units_convf(sensor, from_units): @@ -18,7 +18,7 @@ def get_units_convf(sensor, from_units): factor (float) factor = unit_convertion_factor/molecular_weight Note: - This would need to be changed if all pollutants were to be expresed in + This would need to be changed if all pollutants were to be expresed in mass units, instead of ppm/b """ @@ -28,32 +28,31 @@ def get_units_convf(sensor, from_units): if not (search(channel, sensor)): continue # Molecular weight in case of pollutants for pollutant in config._molecular_weights.keys(): - if search(channel, pollutant): + if search(channel, pollutant): molecular_weight = config._molecular_weights[pollutant] break else: molecular_weight = 1 - + # Check if channel is in look-up table - if config._channel_lut[channel] != from_units and from_units != "": - std_out(f"Converting units for {sensor}. From {from_units} to {config._channel_lut[channel]}") + if config._channel_lut[channel] != from_units and from_units != "": + logger.info(f"Converting units for {sensor}. From {from_units} to {config._channel_lut[channel]}") for unit in config._unit_convertion_lut: # Get units - if unit[0] == from_units and unit[1] == config._channel_lut[channel]: + if unit[0] == from_units and unit[1] == config._channel_lut[channel]: factor = unit[2] requires_conc = unit[3] break - elif unit[1] == from_units and unit[0] == config._channel_lut[channel]: + elif unit[1] == from_units and unit[0] == config._channel_lut[channel]: factor = 1/unit[2] requires_conc = unit[3] break if requires_conc: rfactor = factor/molecular_weight else: rfactor = factor - std_out(f"Factor: {rfactor}") - else: - std_out(f"No units conversion needed for {sensor}") + else: + logger.info(f"No units conversion needed for {sensor}. Same units") if from_units == "": - std_out("Empty units in blueprint is placeholder for keep") + logger.info("Empty units in blueprint is placeholder for keep") rfactor = 1 if rfactor != 1: break - + return rfactor diff --git a/scdata/utils/zenodo.py b/scdata/utils/zenodo.py index b3614200..475aab76 100644 --- a/scdata/utils/zenodo.py +++ b/scdata/utils/zenodo.py @@ -1,7 +1,7 @@ ''' Implementation of zenodo export ''' from scdata._config import config -from scdata.utils import std_out, get_tests_log +from scdata.utils import logger, get_tests_log from scdata.utils.report import include_footer from scdata import Test import json, yaml @@ -14,8 +14,8 @@ def zenodo_upload(upload_descritor, sandbox = True, dry_run = True): This section uses the code inspired by this repo https://github.com/darvasd/upload-to-zenodo Uploads a series of tests to zenodo.org using a template in /zenodo_templates and the descriptor file in data/uploads. It will need a ZENODO_TOKEN environment variable to work - The submission needs an additional "Publish" step. - This can also be done from a script, but to be on the safe side, it is not included. + The submission needs an additional "Publish" step. + This can also be done from a script, but to be on the safe side, it is not included. (The attached file cannot be changed after publication) Parameters ---------- @@ -37,7 +37,7 @@ def zenodo_upload(upload_descritor, sandbox = True, dry_run = True): True if all data is uploaded, False otherwise ''' - + def fill_template(individual_descriptor, descriptor_file_name, upload_type = 'dataset'): # Open base template with all keys @@ -59,8 +59,8 @@ def fill_template(individual_descriptor, descriptor_file_name, upload_type = 'da with open (join(config.paths['uploads'], descriptor_file_name), 'w') as descriptor_json: json.dump(filled_template, descriptor_json, ensure_ascii=True) - std_out(f'Created descriptor file for {descriptor_file_name}', 'SUCCESS') - + logger.info(f'Created descriptor file for {descriptor_file_name}') + return json.dumps(filled_template) def get_submission_id(metadata, base_url): @@ -70,8 +70,8 @@ def get_submission_id(metadata, base_url): response = post(url, params={'access_token': environ['ZENODO_TOKEN']}, data = metadata, headers = headers) if response.status_code > 210: - std_out("Error happened during submission, status code: " + str(response.status_code), 'ERROR') - std_out(response.json()['message'], 'ERROR') + logger.error("Error happened during submission, status code: " + str(response.status_code)) + logger.error(response.json()['message']) return None # Get the submission ID @@ -81,20 +81,20 @@ def get_submission_id(metadata, base_url): def upload_file(url, upload_metadata, files): response = post(url, params={'access_token': environ['ZENODO_TOKEN']}, data = upload_metadata, files=files) - return response.status_code + return response.status_code + + logger.info(f'Uploading {upload_descritor} to zenodo') - std_out(f'Uploading {upload_descritor} to zenodo') + if dry_run: logger.warning(f'Dry run. Verify output before setting dry_run to False') - if dry_run: std_out(f'Dry run. Verify output before setting dry_run to False', 'WARNING') - # Sandbox or not - if sandbox: - std_out(f'Using sandbox. Verify output before setting sandbox to False', 'WARNING') + if sandbox: + logger.warning(f'Using sandbox. Verify output before setting sandbox to False') base_url = config.zenodo_sandbox_base_url else: base_url = config.zenodo_real_base_url - + if '.yaml' not in upload_descritor: upload_descritor = upload_descritor + '.yaml' - + with open (join(config.paths['uploads'], upload_descritor), 'r') as descriptor_file: descriptor = yaml.load(descriptor_file, Loader = yaml.SafeLoader) @@ -102,7 +102,7 @@ def upload_file(url, upload_metadata, files): # Set options for processed and raw uploads stage_list = ['base'] - + if 'options' in descriptor[key].keys(): options = descriptor[key]['options'] else: options = {'include_processed_data': False, 'include_footer_doi': True, 'include_td_html': False} @@ -110,46 +110,46 @@ def upload_file(url, upload_metadata, files): if 'include_processed_data' not in options: options['include_processed_data'] = False if 'include_footer_doi' not in options: options['include_footer_doi'] = True if 'include_td_html' not in options: options['include_td_html'] = False - + if options['include_processed_data']: stage_list.append('processed') - std_out(f'Options {options}') + logger.info(f'Options {options}') # Fill template if 'upload_type' in descriptor[key].keys(): upload_type = descriptor[key]['upload_type'] - else: - std_out(f'Upload type not set for key {key}. Skipping', 'ERROR') + else: + logger.error(f'Upload type not set for key {key}. Skipping') continue metadata = fill_template(descriptor[key], key, upload_type = upload_type) - + # Get submission ID if not dry_run: submission_id = get_submission_id(metadata, base_url) else: submission_id = 0 if submission_id is not None: - + # Dataset upload if upload_type == 'dataset': # Get the tests to upload tests = descriptor[key]['tests'] - + # Get url where to post the files url = f"{base_url}/api/deposit/depositions/{submission_id}/files" - test_logs = get_tests_log() + test_logs = get_tests_log() for test_name in tests: - + # Get test path - std_out(f'Uploading data from test {test_name}') - + logger.info(f'Uploading data from test {test_name}') + test_path = test_logs[test_name]['path'] # Upload the test descriptor (yaml (and html) format) td_upload = ['yaml'] - with open (join(test_path, 'test_description.yaml'), 'r') as td: + with open (join(test_path, 'test_description.yaml'), 'r') as td: yaml_td = yaml.load(td, Loader = yaml.SafeLoader) - + if options['include_td_html']: html_td = td_to_html(yaml_td, test_path) if html_td: td_upload.append('html') @@ -157,114 +157,114 @@ def upload_file(url, upload_metadata, files): for td_format in td_upload: upload_metadata = {'name': f'test_description_{test_name}.{td_format}'} - + files = {'file': open(join(test_path, f'test_description.{td_format}'), 'rb')} file_size = getsize(join(test_path, f'test_description.{td_format}'))/(1024*1024.0*1024) - - if file_size > 50: std_out(f'File size for {test_name} is over 50Gb ({file_size})', 'WARNING') - + + if file_size > 50: logger.warning(f'File size for {test_name} is over 50Gb ({file_size})') + if not dry_run: status_code = upload_file(url, upload_metadata, files) else: status_code = 200 - - if status_code > 210: - std_out ("Error happened during file upload, status code: " + str(status_code), 'ERROR') + + if status_code > 210: + logger.error ("Error happened during file upload, status code: " + str(status_code)) return else: - std_out(f"{upload_metadata['name']} submitted with submission ID = \ - {submission_id} (DOI: 10.5281/zenodo.{submission_id})" ,"SUCCESS") - + logger.info(f"{upload_metadata['name']} submitted with submission ID = \ + {submission_id} (DOI: 10.5281/zenodo.{submission_id})") + # Load the api devices to have them up to date in the cache - if any(yaml_td['devices'][device]['source'] == 'api' for device in yaml_td['devices'].keys()): + if any(yaml_td['devices'][device]['source'] == 'api' for device in yaml_td['devices'].keys()): test = Test(test_name) test.load(options = {'store_cached_api': True}) - + for device in yaml_td['devices'].keys(): - - std_out(f'Uploading data from device {device}') - + + logger.info(f'Uploading data from device {device}') + # Upload basic and processed data for file_stage in stage_list: - + file_path = '' - + try: # Find device files - if file_stage == 'processed': + if file_stage == 'processed': file_name = f'{device}.csv' file_path = join(test_path, 'processed', file_name) upload_metadata = {'name': f'{device}_PROCESSED.csv'} - + elif file_stage == 'base': if 'csv' in yaml_td['devices'][device]['source']: file_name = yaml_td['devices'][device]['processed_data_file'] file_path = join(test_path, file_name) - + elif yaml_td['devices'][device]['source'] == 'api': file_name = f'{device}.csv' file_path = join(test_path, 'cached', file_name) - + upload_metadata = {'name': file_name} if file_path != '': files = {'file': open(file_path, 'rb')} file_size = getsize(file_path)/(1024*1024.0*1024) - - if file_size > 50: std_out(f'File size for {file_name} over 50Gb ({file_size})', 'WARNING') - + + if file_size > 50: logger.warning(f'File size for {file_name} over 50Gb ({fi 'WARNING') + if not dry_run: status_code = upload_file(url, upload_metadata, files) else: status_code = 200 - - if status_code > 210: - std_out (f"Error happened during file upload, status code: {status_code}. Skipping", 'ERROR') + + if status_code > 210: + logger.error (f"Error happened during file upload, status code: {status_code}. Skipping") continue - std_out(f"{upload_metadata['name']} submitted with submission ID =\ - {submission_id} (DOI: 10.5281/zenodo.{submission_id})" ,"SUCCESS") + logger.info(f"{upload_metadata['name']} submitted with submission ID =\ + {submission_id} (DOI: 10.5281/zenodo.{submission_id})") except: - if not exists(file_path): std_out(f'File {file_name} does not exist (type = {file_stage}). Skipping', 'ERROR') + if not exists(file_path): logger.error(f'File {file_name} does not exist (type = {file_stage}). Skipping') # print_exc() pass - + # Check if we have a report in the keys if 'report' in descriptor[key].keys(): for file_name in descriptor[key]['report']: file_path = join(config.paths['uploads'], file_name) - + if options['include_footer_doi'] and file_name.endswith('.pdf'): output_file_path = file_path[:file_path.index('.pdf')] + '_doi.pdf' include_footer(file_path, output_file_path, link = f'https://doi.org/10.5281/zenodo.{submission_id}') file_path = output_file_path - + upload_metadata = {'name': file_name} files = {'file': open(file_path, 'rb')} file_size = getsize(file_path)/(1024*1024.0*1024) - - if file_size > 50: std_out(f'File size for {file_name} is over 50Gb({file_size})', 'WARNING') - + + if file_size > 50: logger.warning(f'File size for {file_name} is over 50Gb({file_size})') + if not dry_run: status_code = upload_file(url, upload_metadata, files) else: status_code = 200 - if status_code > 210: - std_out (f"Error happened during file upload, status code: {status_code}. Skipping", 'ERROR') + if status_code > 210: + logger.error (f"Error happened during file upload, status code: {status_code}. Skipping") continue - std_out(f"{upload_metadata['name']} submitted with submission ID = \ - {submission_id} (DOI: 10.5281/zenodo.{submission_id})" ,"SUCCESS") + logger.info(f"{upload_metadata['name']} submitted with submission ID = \ + {submission_id} (DOI: 10.5281/zenodo.{submission_id})") if upload_type == 'publication': - std_out('Not implemented') + logger.info('Not implemented') return False - - std_out(f'Submission completed - (DOI: 10.5281/zenodo.{submission_id})', 'SUCCESS') - std_out(f'------------------------------------------------------------') + + logger.info(f'Submission completed - (DOI: 10.5281/zenodo.{submission_id})') + logger.info(f'------------------------------------------------------------') else: - std_out(f'Submission ID error', 'ERROR') + logger.error(f'Submission ID error') continue return True From 521a42a53153b291e8634d76e34e427d5b9e527f Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:58:19 +0200 Subject: [PATCH 21/72] Rename test/tools folder --- scdata/test/tools/__init__.py | 3 + scdata/test/{utils => tools}/combine.py | 28 ++--- scdata/test/tools/history.py | 32 ++++++ scdata/test/{utils => tools}/prepare.py | 4 +- scdata/test/utils/__init__.py | 4 - scdata/test/utils/checks.py | 134 ------------------------ scdata/test/utils/dispersion.py | 112 -------------------- 7 files changed, 51 insertions(+), 266 deletions(-) create mode 100644 scdata/test/tools/__init__.py rename scdata/test/{utils => tools}/combine.py (70%) create mode 100644 scdata/test/tools/history.py rename scdata/test/{utils => tools}/prepare.py (94%) delete mode 100644 scdata/test/utils/__init__.py delete mode 100644 scdata/test/utils/checks.py delete mode 100644 scdata/test/utils/dispersion.py diff --git a/scdata/test/tools/__init__.py b/scdata/test/tools/__init__.py new file mode 100644 index 00000000..7d3f7ba4 --- /dev/null +++ b/scdata/test/tools/__init__.py @@ -0,0 +1,3 @@ +from .combine import combine +from .prepare import prepare, normalise_vbls +from .history import history diff --git a/scdata/test/utils/combine.py b/scdata/test/tools/combine.py similarity index 70% rename from scdata/test/utils/combine.py rename to scdata/test/tools/combine.py index 3a609263..481b4db2 100644 --- a/scdata/test/utils/combine.py +++ b/scdata/test/tools/combine.py @@ -1,10 +1,10 @@ from pandas import DataFrame -from scdata.utils import std_out +from scdata.utils import logger from scdata.device import Device def combine(self, devices = None, readings = None): """ - Combines devices from a test into a new dataframe, following the + Combines devices from a test into a new dataframe, following the naming as follows: DEVICE-NAME_READING-NAME Parameters ---------- @@ -17,44 +17,44 @@ def combine(self, devices = None, readings = None): Returns ------- Dataframe if successful or False otherwise - """ + """ dfc = DataFrame() if devices is None: dl = list(self.devices.keys()) - else: + else: # Only pick the ones that are actually present dl = list(set(devices).intersection(list(self.devices.keys()))) if len(dl) != len(devices): - std_out('Requested devices are not all present in devices', 'WARNING') - std_out(f'Discarding {set(devices).difference(list(self.devices.keys()))}') + logger.warning('Requested devices are not all present in devices') + logger.info(f'Discarding {set(devices).difference(list(self.devices.keys()))}') for device in dl: new_names = list() if readings is None: rl = list(self.devices[device].readings.columns) - else: + else: # Only pick the ones that are actually present rl = list(set(readings).intersection(list(self.devices[device].readings.columns))) if any([reading not in rl for reading in readings]): - std_out(f'Requested readings are not all present in readings for device {device}', 'WARNING') - std_out(f'Discarding {list(set(readings).difference(list(self.devices[device].readings.columns)))}', 'WARNING') - + logger.warning(f'Requested readings are not all present in readings for device {device}') + logger.warning(f'Discarding {list(set(readings).difference(list(self.devices[device].readings.columns)))}') + rename = dict() for reading in rl: rename[reading] = reading + '_' + self.devices[device].id - + df = self.devices[device].readings[rl].copy() df.rename(columns = rename, inplace = True) dfc = dfc.combine_first(df) - + if dfc.empty: - std_out('Error ocurred while combining data. Review data', 'ERROR') + logger.error('Error ocurred while combining data. Review data') return False else: - std_out('Data combined successfully', 'SUCCESS') + logger.info('Data combined successfully') return dfc \ No newline at end of file diff --git a/scdata/test/tools/history.py b/scdata/test/tools/history.py new file mode 100644 index 00000000..0e5a6b05 --- /dev/null +++ b/scdata/test/tools/history.py @@ -0,0 +1,32 @@ +from os import walk +from os.path import join +import yaml +from scdata._config import config + +def history(deep_description = False): + ''' + Gets the tests in the given dir, looking for test_description.yaml + ''' + + # Get available tests in the data folder structure + tests = dict() + + for root, dirs, files in walk(config.paths['processed']): + for file in files: + # TODO Fix + if file.endswith(".yaml") or file.endswith(".json"): + test_name = root.split('/')[-1] + if test_name.startswith('.'): continue + + tests[test_name] = dict() + tests[test_name]['path'] = root + + if deep_description == True: + filePath = join(root, file) + with open(filePath, 'r') as stream: + yamlFile = yaml.load(stream, Loader = yaml.FullLoader) + for key in yamlFile.keys(): + if key == 'devices': continue + tests[test_name][key] = yamlFile[key] + + return tests \ No newline at end of file diff --git a/scdata/test/utils/prepare.py b/scdata/test/tools/prepare.py similarity index 94% rename from scdata/test/utils/prepare.py rename to scdata/test/tools/prepare.py index fc1666f8..7b0282e1 100644 --- a/scdata/test/utils/prepare.py +++ b/scdata/test/tools/prepare.py @@ -1,4 +1,4 @@ -from scdata.utils import std_out, dict_fmerge, clean +from scdata.utils import logger, dict_fmerge, clean from scdata._config import config from numpy import array from pandas import DataFrame @@ -40,7 +40,7 @@ def prepare(self, measurand, inputs, options = dict()): common_channels = inputs[list(inputs.keys())[0]] for input_device in inputs.keys(): common_channels = list(set(common_channels).intersection(set(inputs[input_device]))) - std_out(f'Performing avg in common columns {common_channels}') + logger.info(f'Performing avg in common columns {common_channels}') for channel in common_channels: columns_list = [channel + '_' + device for device in list(inputs.keys())] df[channel + '_AVG'] = df[columns_list].mean(axis = 1) diff --git a/scdata/test/utils/__init__.py b/scdata/test/utils/__init__.py deleted file mode 100644 index af6cc738..00000000 --- a/scdata/test/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .combine import combine -from .prepare import prepare, normalise_vbls -from .dispersion import dispersion_analysis, dispersion_summary -from .checks import get_common_channels, gaps_check diff --git a/scdata/test/utils/checks.py b/scdata/test/utils/checks.py deleted file mode 100644 index 92b09f32..00000000 --- a/scdata/test/utils/checks.py +++ /dev/null @@ -1,134 +0,0 @@ -from scdata.utils import std_out -import matplotlib.pyplot as plt -import missingno as msno -from pandas import to_datetime, DataFrame -from scdata.test.plot.plot_tools import prepare_data -from scdata._config import config -from scdata.utils.dictmerge import dict_fmerge - -def gaps_check(self, devices = None, channels = None, groupby = 'channel', **kwargs): - if config.framework == 'jupyterlab': plt.ioff(); - plt.clf(); - - if 'formatting' not in kwargs: - std_out('Using default formatting') - formatting = config._missingno_def_fmt - else: - formatting = dict_fmerge(config._missingno_def_fmt, kwargs['formatting']) - - # Get list of devices - if devices is None: - _devices = list(self.devices.keys()) - else: - _devices = devices - - if channels is None: - std_out('Need some channels to check gaps for', 'ERROR') - return - - if groupby == 'device': - - for device in _devices: - if device not in self.devices: - std_out('Device not found in test', 'WARNING') - continue - msno.matrix(self.devices[device].readings) - - elif groupby == 'channel': - - for channel in channels: - traces = {"1": {"devices": _devices, "channel": channel, "subplot": 1}} - options = config._plot_def_opt - df, _ = prepare_data(self, traces, options) - fig, ax = plt.subplots(1, len(_devices), figsize=(formatting['width'], formatting['height'])) - for device in _devices: - if device not in self.devices: - std_out('Device not found in test', 'WARNING') - continue - msno.matrix(df = DataFrame(df[f'{channel}_{device}']), sparkline=False, ax = ax[_devices.index(device)], fontsize=formatting['fontsize']) - ax[_devices.index(device)].set_yticks([i for i in range(len(df))], [i for i in df.index.values]) - plt.show() - -def get_common_channels(self, devices = None, ignore_missing_channels = False, pop_zero_readings_devices = False, detailed = False, verbose = True): - ''' - Convenience method get the common channels of the devices in the test - Params: - devices: list - None - List of devices to get common channels from. Passing None means 'all' - ignore_missing_channels: bool - False - In case there is a device with lower amount of channels, ignore the missing channels and keep going - pop_zero_readings_devices: bool - False - Remove devices from test that have no readings - verbose: bool - True - Print extra info - Returns: - List containing the common channels to all devices - ''' - - # Get list of devices - if devices is None: - list_devices = list(self.devices.keys()) - return_all = True - else: - list_devices = devices - return_all = False - - # Init list of common channels. Get just the first one - list_channels = self.devices[list_devices[0]].readings.columns - - # Extract list of common channels - len_channels = len(list_channels) - show_warning = False - channels_devices = {} - - for device in list_devices: - - if ignore_missing_channels: - # We don't reduce the list in case the new list is smaller - list_channels = list(set(list_channels) | set(self.devices[device].readings.columns)) - else: - # We reduce it - list_channels = list(set(list_channels) & set(self.devices[device].readings.columns)) - - channels_devices[device] = {len(self.devices[device].readings.columns)} - std_out ('Device {}'.format(device), force = verbose) - std_out (f'Min reading at {self.devices[device].readings.index[0]}', force = verbose) - std_out (f'Max reading at {self.devices[device].readings.index[-1]}', force = verbose) - std_out (f'Number of dataframe points {len(self.devices[device].readings.index)}', force = verbose) - if detailed: - for column in list_channels: - std_out ('\tColumn {}'.format(column), force = verbose) - nas = self.devices[device].readings[column].isna() - std_out ('\tNumber of nas {}'.format(nas.sum()), force = verbose) - - ## Eliminate devices with no points - if (len(self.devices[device].readings.index) == 0): - std_out (f'Device {device} for insufficient data points', 'WARNING') - if pop_zero_readings_devices: self.devices.pop(device) - # Check the number of channels - elif len_channels != len(self.devices[device].readings.columns): - std_out(f"Device {device} has {len(self.devices[device].readings.columns)}. Current common channel length is {len_channels}", 'WARNING') - len_channels = len(list_channels) - show_warning = True - if ignore_missing_channels: std_out ("Ignoring missing channels", 'WARNING') - std_out ('---------', force = verbose) - - - if return_all: - - self.common_channels = list_channels - - std_out(f'Final list of channels:\n {self.common_channels}') - if show_warning: - std_out (f'Some devices show less amount of sensors', 'WARNING') - print (channels_devices) - - return self.common_channels - - else: - - return list_channels diff --git a/scdata/test/utils/dispersion.py b/scdata/test/utils/dispersion.py deleted file mode 100644 index 56c723a0..00000000 --- a/scdata/test/utils/dispersion.py +++ /dev/null @@ -1,112 +0,0 @@ -from scdata.utils import std_out, localise_date -from pandas import DataFrame -from scdata._config import config - -def dispersion_analysis(self, devices = None, min_date = None, max_date = None, timezone = 'Europe/Madrid', smooth_window = 5): - ''' - Creates channels on a new dataframe for each device/channel combination, and makes the average/std of each - in a point-by-point fashion - - Parameters: - ----------- - devices: list - Default: None - If list of devices is None, then it will use all devices in self.devices - min_date: String - Default: None - Minimum date from which to perform the analysis - - max_date: String - Default: None - Maximum date from which to perform the analysis - - timezone: String - Default: None - Sensors for timezone - - smooth_window: int - Default: 5 - If not None, performs smoothing of the channels with rolling average. - - Returns: - --------- - - ''' - dispersion_df = DataFrame() - - # Get common channels for this group - if devices is not None: - common_ch = self.get_common_channels(devices = devices) - _devices = devices - else: - common_ch = self.get_common_channels() - _devices = self.devices - - # Localise dates - min_date = localise_date(min_date, timezone) - max_date = localise_date(max_date, timezone) - - # Calculate the dispersion for the sensors present in the dataset - warning = False - - for channel in common_ch: - columns = list() - - if channel in config._dispersion['ignore_channels']: continue - - for device in _devices: - if channel in self.devices[device].readings.columns and len(self.devices[device].readings.loc[:,channel]) >0: - # Important to resample and bfill for unmatching measures - if smooth_window is not None: - # channel_new = self.devices[device].readings[channel].resample('1Min').bfill().rolling(window=smooth_window).mean() - channel_new = self.devices[device].readings[channel].bfill().rolling(window=smooth_window).mean() - dispersion_df[channel + '-' + device] = channel_new[channel_new > 0] - else: - dispersion_df[channel + '-' + device] = self.devices[device].readings[channel].resample('1Min').bfill() - - columns.append(channel + '-' + device) - else: - std_out(f'Device {device} does not contain {channel}

', 'WARNING') - warning = True - - dispersion_df.index = localise_date(dispersion_df.index, timezone) - - # Trim dataset to min and max dates (normally these tests are carried out with _minutes_ of differences) - if min_date is not None: dispersion_df = dispersion_df[dispersion_df.index > min_date] - if max_date is not None: dispersion_df = dispersion_df[dispersion_df.index < max_date] - - # Calculate Metrics - dispersion_df[channel + '_AVG'] = dispersion_df.loc[:,columns].mean(skipna=True, axis = 1) - dispersion_df[channel + '_STD'] = dispersion_df.loc[:,columns].std(skipna=True, axis = 1) - - if not warning: - std_out(f'All devices have the provided channels list recorded') - else: - std_out(f'Missing channels, review data', 'WARNING') - - if devices is None: - self.dispersion_df = dispersion_df - return self.dispersion_summary - - group_dispersion_summary = dict() - - for channel in common_ch: - if channel in config._dispersion['ignore_channels']: continue - # Calculate - group_dispersion_summary[channel] = dispersion_df[channel + '_STD'].mean() - - return group_dispersion_summary - -@property -def dispersion_summary(self): - self._dispersion_summary = dict() - - if self.dispersion_df is None: - std_out('Perform dispersion analysis first!', 'ERROR') - return None - for channel in self.common_channels: - if channel in config._dispersion['ignore_channels']: continue - # Calculate - self._dispersion_summary[channel] = self.dispersion_df[channel + '_STD'].mean() - - return self._dispersion_summary From 3894985af18f70b20057b84e9cb6271c4b817417 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:58:43 +0200 Subject: [PATCH 22/72] Move device into single file --- scdata/device/__init__.py | 765 +------------------------------------- scdata/device/device.py | 684 ++++++++++++++++++++++++++++++++++ 2 files changed, 686 insertions(+), 763 deletions(-) create mode 100644 scdata/device/device.py diff --git a/scdata/device/__init__.py b/scdata/device/__init__.py index ba588b07..21ef9149 100644 --- a/scdata/device/__init__.py +++ b/scdata/device/__init__.py @@ -1,763 +1,2 @@ -''' Main implementation of class Device ''' - -from scdata.utils import std_out, localise_date, dict_fmerge, get_units_convf -from scdata.io import read_csv_file, export_csv_file -from scdata.utils import LazyCallable, url_checker, get_json_from_url -from scdata._config import config -from scdata.device.process import * -from scdata.io.device_api import * - -from os.path import join, basename -from urllib.parse import urlparse -from pandas import DataFrame, to_timedelta -from numpy import nan -from collections.abc import Iterable -from importlib import import_module - -from timezonefinder import TimezoneFinder -tf = TimezoneFinder() - -class Device(object): - ''' Main implementation of the device class ''' - - def __init__(self, blueprint = None, source=dict(), params=dict()): - - ''' - Creates an instance of device. Devices are objects that contain sensors data, metrics - (calculations based on sensors data), and metadata such as units, dates, frequency and source - - Parameters: - ----------- - blueprint: String - Default: 'sck_21' - Defines the type of device. For instance: sck_21, sck_20, csic_station, muv_station - parrot_soil, sc_20_station, sc_21_station... A list of all the blueprints is found in - config.blueprints_urls and accessible via the scdata.utils.load_blueprints(urls) function. - The blueprint can also be defined from the postprocessing info in SCAPI. - The manual parameter passed here overrides that of the API. - - source: dict() - Default: empty dict - A dictionary containing a description of how to obtain the data from the device itself. - - params: dict() - Default: empty dict - A dictionary containing information about the device itself. Depending on the blueprint, this params needs to have different data. If not all the data is present, the corresponding blueprint's default will be used - - Examples: - ---------- - Device('sck_21', handler = {'source': 'api', 'id': '1919'}, params = {}) - device with sck_21 blueprint with 1919 ID - - Returns - ---------- - Device object - ''' - - # Set handler - self.source = source - self.__set_handler__() - - # Set custom params - self.params = params - self.__set_blueprint_attrs__(config.blueprints['base']) - self.__set_params_attrs__(params) - - # Start out handler object now - if self.hclass is not None: - self.handler = self.hclass(self.id) - - # Set blueprint - if blueprint is not None: - self.blueprint = blueprint - if self.blueprint not in config.blueprints: - raise ValueError(f'Specified blueprint {self.blueprint} is not in available blueprints') - self.__set_blueprint_attrs__(config.blueprints[self.blueprint]) - else: - if url_checker(self.handler.blueprint_url): - std_out(f'Loading postprocessing blueprint from:\n{self.handler.blueprint_url}') - self.blueprint = basename(urlparse(self.handler.blueprint_url).path).split('.')[0] - else: - raise ValueError(f'Specified blueprint url {self.handler.blueprint_url} is not valid') - self.__set_blueprint_attrs__(self.handler.blueprint) - - # TODO Remove - # self.__fill_handler_metrics__() - - # Init the rest of the stuff - self.data = DataFrame() - self.loaded = False - self.processed = False - self.postprocessing_updated = False - std_out(f'Device {self.id} initialised', 'SUCCESS') - - def __set_handler__(self): - # Add handlers here - if self.source['type'] == 'api': - if 'module' in self.source: - module = self.source['module'] - else: - module = 'scdata.io.device_api' - try: - hmod = import_module(self.source['module']) - except ModuleNotFoundError: - std_out(f"Module not found: {self.source['module']}") - raise ModuleNotFoundError(f'Specified module not found') - else: - self.hclass = getattr(hmod, self.source['handler']) - # Create object - std_out(f'Setting handler as {self.hclass}') - elif self.source['type'] == 'csv': - # TODO Add handler here - std_out ('No handler for CSV yet', 'ERROR') - self.hclass = None - elif self.source['type'] == 'kafka': - # TODO Add handler here - std_out ('No handler for kafka yet', 'ERROR') - raise NotImplementedError - - def __set_blueprint_attrs__(self, blueprintd): - - # Set attributes - for bpitem in blueprintd: - if bpitem not in vars(self): - self.__setattr__(bpitem, blueprintd[bpitem]) - elif self.__getattribute__(bpitem) is None: - self.__setattr__(bpitem, blueprintd[bpitem]) - - def __set_params_attrs__(self, params): - - # Params attributes - for param in params.keys(): - if param not in vars(self): - std_out (f'Ignoring {param} from input') - continue - if type(self.__getattribute__(param)) == dict: - self.__setattr__(param, dict_fmerge(self.__getattribute__(param), params[param])) - else: - self.__setattr__(param, params[param]) - - # TODO - def validate(self): - return True - - def merge_sensor_metrics(self, ignore_empty = True): - std_out('Merging sensor and metrics channels') - all_channels = dict_fmerge(self.sensors, self.metrics) - - if ignore_empty: - to_ignore = [] - for channel in all_channels.keys(): - if channel not in self.data: to_ignore.append(channel) - elif self.data[channel].dropna().empty: - std_out (f'{channel} is empty') - to_ignore.append(channel) - - [all_channels.pop(x) for x in to_ignore] - - return all_channels - - def add_metric(self, metric = dict()): - ''' - Add a metric to the device to be processed by a callable function - Parameters - ---------- - metric: dict - Empty dict - Description of the metric to be added. It only adds it to - Device.metrics, but does not calculate anything yet. The metric dict needs - to follow the format: - metric = { - 'metric_name': {'process': - 'args': - 'kwargs': <**kwargs for @function_name> - 'from_list': - } - } - The 'from_list' parameter is optional, and onle needed if the process is not - already available in scdata.device.process. - - For a list of available processes call help(scdata.device.process) - - Example: - -------- - metric = {'NO2_CLEAN': {'process': 'clean_ts', - 'kwargs': {'name': pollutant, - 'limits': [0, 350], - 'window_size': 5} - }} - Returns - ---------- - True if added metric - ''' - - if 'metrics' not in vars(self): - std_out(f'Device {self.id} has no metrics yet. Adding') - self.metrics = dict() - - try: - metricn = next(iter(metric.keys())) - self.metrics[metricn] = metric[metricn] - # TODO Except what? - except: - print_exc() - return False - - std_out(f'Metric {metric} added to metrics', 'SUCCESS') - return True - - def del_metric(self, metricn = ''): - if 'metrics' not in vars(self): return - if metricn in self.metrics: self.metrics.pop(metricn, None) - if metricn in self.data.columns: self.data.__delitem__(metricn) - - if metricn not in self.data and metricn not in self.metrics: - std_out(f'Metric {metricn} removed from metrics', 'SUCCESS') - return True - return False - - async def load(self, convert_units = True, only_unprocessed = False, max_amount = None, follow_defaults = False): - ''' - Loads the device with some options - - Parameters: - ----------- - options: dict() - Default: None - options['min_date'] = date to load data from - Default to device min_date (from blueprint or test) - options['max_date'] = date to load data to - Default to device max_date (from blueprint or test) - options['clean_na'] = clean na (drop_na, fill_na or None) - Default to device clean_na (from blueprint or test) - options['frequency'] = frequency to load data at in pandas format - Default to device frequency (from blueprint or test) or '1Min' - path: String - Default: '' - Path were the csv file is, if any. Normally not needed to be provided, only for internal usage - convert_units: bool - Default: True - Convert units for channels based on config._channel_lut - only_unprocessed: bool - Default: False - Loads only unprocessed data - max_amount: int - Default: None - Trim dataframe to this amount for processing and forwarding purposes - follow_defaults: bool - Default: False - Use defaults from config._csv_defaults for loading - Returns - ---------- - True if loaded correctly - ''' - - # # Add overrides if we have them, otherwise set device defaults - # self.__check_overrides__(options) - # std_out(f'Using options for device: {options}') - - if self.source['type'] == 'csv': - # TODO Review if this is necessary - if follow_defaults: - index_name = config._csv_defaults['index_name'] - sep = config._csv_defaults['sep'] - skiprows = config._csv_defaults['skiprows'] - else: - index_name = self.source['index_name'] - sep = self.source['sep'] - skiprows = self.source['skiprows'] - - # TODO Change this for a csv handler - # here we don't use tzaware because we only load preprocessed data - try: - csv_data = read_csv_file( - file_path = join(path, self.processed_data_file), - timezone = self.timezone, - frequency = self.frequency, - clean_na = self.clean_na, - resample = self.resample, - index_name = index_name, - sep = sep, - skiprows = skiprows) - except FileNotFoundError: - std_out(f'File not found for device {self.id} in {path}', 'ERROR') - else: - if csv_data is not None: - self.data = self.data.combine_first(csv_data) - self.__convert_names__() - self.loaded = self.__load_wrapup__(max_amount, convert_units) - - elif self.source['type'] == 'api': - - if self.handler.method == 'async': - await self.handler.get_data( - min_date = self.min_date, - max_date = self.max_date, - freq = self.frequency, - clean_na = self.clean_na, - resample = self.resample, - only_unprocessed = only_unprocessed) - else: - self.handler.get_data( - min_date = self.min_date, - max_date = self.max_date, - freq = self.frequency, - clean_na = self.clean_na, - resample = self.resample, - only_unprocessed = only_unprocessed) - # In principle this makes both dataframes as they are unmutable - self.data = self.handler.data - - # Wrap it all up - self.loaded = self.__load_wrapup__(max_amount, convert_units) - - elif self.source['type'] == 'kafka': - std_out('Not yet', 'ERROR') - raise NotImplementedError - - self.processed = False - return self.loaded - - def __load_wrapup__(self, max_amount, convert_units): - if self.data is not None: - self.__check_sensors__() - if not self.data.empty: - if max_amount is not None: - # TODO Dirty workaround - std_out(f'Trimming dataframe to {max_amount} rows') - self.data=self.data.dropna(axis = 0, how='all').head(max_amount) - # Convert units - if convert_units: - self.__convert_units__() - self.postprocessing_updated = False - return True - else: - std_out('Empty dataframe in data', 'WARNING') - return False - else: - return False - - # TODO remove - def __fill_handler_metrics__(self): - std_out('Checking if metrics need to be added based on hardware info') - - if self.handler.hardware_postprocessing is None: - std_out(f'No hardware url in device {self.id}, ignoring') - return None - - for metric in self.handler.metrics: - metricn = next(iter(metric)) - if metricn not in self.metrics: - std_out(f'Metric {metricn} from handler not in blueprint, ignoring.', 'WARNING') - continue - self.metrics[metricn]['kwargs'] = metric[metricn]['kwargs'] - - def __check_sensors__(self): - - extra_sensors = list() - # Check sensors from the list that are not in self.data.columns - for sensor in self.sensors: - if sensor not in self.data.columns: - std_out(f'{sensor} not in data columns', 'INFO') - extra_sensors.append(sensor) - - extra_columns = list() - # Check columns from the data that are not in self.sensors - for column in self.data.columns: - if column not in self.sensors: - extra_columns.append(column) - std_out(f'Data contains extra columns: {extra_columns}', 'INFO') - - if config.data['strict_load']: - std_out(f"config.data['strict_load'] is enabled. Removing extra columns") - if extra_sensors != []: - std_out(f'Removing sensors from device.sensors: {extra_sensors}', 'WARNING') - for sensor_to_remove in extra_sensors: - self.sensors.pop(sensor_to_remove, None) - if extra_columns != []: - self.data.drop(extra_columns, axis=1, inplace=True) - else: - std_out(f"config.data['strict_load'] is disabled. Ignoring extra columns") - - std_out(f'Device sensors after checks: {list(self.sensors.keys())}') - - def __convert_names__(self): - rename = dict() - for sensor in self.sensors: - if 'id' in self.sensors[sensor]: - if self.sensors[sensor]['id'] in self.data.columns: - rename[self.sensors[sensor]['id']] = sensor - else: - std_out(f'No id in {self.sensors[sensor]}', 'WARNING') - self.data.rename(columns=rename, inplace=True) - - def __convert_units__(self): - ''' - Convert the units based on the UNIT_LUT and blueprint - NB: what is read/written from/to the cache is not converted. - The files are with original units, and then converted in the device only - for the data but never chached like so. - ''' - std_out('Checking if units need to be converted') - for sensor in self.sensors: - factor = get_units_convf(sensor, from_units = self.sensors[sensor]['units']) - - if factor != 1: - self.data.rename(columns={sensor: sensor + '_in_' + self.sensors[sensor]['units']}, inplace=True) - self.data.loc[:, sensor] = self.data.loc[:, sensor + '_in_' + self.sensors[sensor]['units']]*factor - std_out('Units check done', 'SUCCESS') - - def process(self, only_new = False, lmetrics = None): - ''' - Processes devices metrics, either added by the blueprint definition - or the addition using Device.add_metric(). See help(Device.add_metric) for - more information about the definition of the metrics to be added. - - Parameters - ---------- - only_new: boolean - False - To process or not the existing channels in the Device.data that are - defined in Device.metrics - lmetrics: list - None - List of metrics to process. If none, processes all - Returns - ---------- - boolean - True if processed ok, False otherwise - ''' - - process_ok = True - self.postprocessing_updated = False - - if 'metrics' not in vars(self): - std_out(f'Device {self.id} has nothing to process. Skipping', 'WARNING') - return process_ok - - std_out('---------------------------') - std_out(f'Processing device {self.id}') - - if lmetrics is None: metrics = self.metrics - else: metrics = dict([(key, self.metrics[key]) for key in lmetrics]) - - for metric in metrics: - std_out(f'---') - std_out(f'Processing {metric}') - - if only_new and metric in self.data: - std_out(f'Skipping. Already in device') - continue - - # Check if the metric contains a custom from_list - if 'from_list' in metrics[metric]: - lazy_name = metrics[metric]['from_list'] - else: - lazy_name = f"scdata.device.process.{metrics[metric]['process']}" - - try: - funct = LazyCallable(lazy_name) - except ModuleNotFoundError: - process_ok &= False - std_out('Problem adding lazy callable to metrics list', 'ERROR') - pass - - args, kwargs = list(), dict() - if 'args' in metrics[metric]: args = metrics[metric]['args'] - if 'kwargs' in metrics[metric]: kwargs = metrics[metric]['kwargs'] - - try: - result = funct(self.data, *args, **kwargs) - except KeyError: - std_out('Cannot process requested function with data provided', 'ERROR') - process_ok = False - pass - else: - if result is not None: - self.data[metric] = result - process_ok &= True - # If the metric is None, might be for many reasons and shouldn't collapse the process_ok - - if process_ok: - std_out(f"Device {self.id} processed", "SUCCESS") - self.processed = process_ok & self.update_postprocessing_date() - - return self.processed - - def update_postprocessing_date(self): - - latest_postprocessing = localise_date(self.data.index[-1]+\ - to_timedelta(self.frequency), 'UTC') - if self.handler.update_latest_postprocessing(latest_postprocessing): - if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing: - self.postprocessing_updated = True - else: - self.postprocessing_updated = False - return self.postprocessing_updated - - # TODO - def checks(self, level): - ''' - Device checks - ''' - # TODO Make checks dependent on each handler - if self.source == 'api': - # TODO normalise the functions accross all handlers - # Check status code from curl - response = self.api_device.checks() - response['status'] = 200 - - return response - - # TODO Remove - def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): - ''' - Forwards data to another api. - Parameters - ---------- - chunk_size: int - 500 - Chunk size to be sent to device.post_data_to_device in question - dry_run: boolean - False - Post the payload to the API or just return it - max_retries: int - 2 - Maximum number of retries per chunk - Returns - ---------- - boolean - True if posted ok, False otherwise - ''' - - # Import requested handler - hmod = __import__('scdata.io.device_api', fromlist = ['io.device_api']) - Hclass = getattr(hmod, config.connectors[self.forwarding_request]['handler']) - - # Create new device in target API if it hasn't been created yet - if self.forwarding_params is None: - std_out('Empty forwarding information, attemping creating a new device', 'WARNING') - # We assume the device has never been posted - # Construct new device kwargs dictionary - kwargs = dict() - for item in config.connectors[self.forwarding_request]['kwargs']: - val = config.connectors[self.forwarding_request]['kwargs'][item] - if val == 'options': - kitem = self.options[item] - elif val == 'config': - # Items in config should be underscored - kitem = config.__getattr__(f'_{item}') - elif isinstance(val, Iterable): - if 'same' in val: - if 'as_device' in val: - if item == 'sensors': - kitem = self.merge_sensor_metrics(ignore_empty = True) - elif item == 'description': - kitem = self.blueprint.replace('_', ' ') - elif 'as_api' in val: - if item == 'sensors': - kitem = self.api_device.get_device_sensors() - elif item == 'description': - kitem = self.api_device.get_device_description() - else: - kitem = val - kwargs[item] = kitem - - response = Hclass.new_device(name = config.connectors[self.forwarding_request]['name_prepend']\ - + str(self.id), - location = self.location, - dry_run = dry_run, - **kwargs) - if response: - if 'message' in response: - if response['message'] == 'Created': - if 'sensorid' in response: - self.forwarding_params = response['sensorid'] - self.api_device.postprocessing['forwarding_params'] = self.forwarding_params - std_out(f'New sensor ID in {self.forwarding_request}\ - is {self.forwarding_params}. Updating') - - if self.forwarding_params is not None: - df = self.data.copy() - df = df[df.columns.intersection(list(self.merge_sensor_metrics(ignore_empty=True).keys()))] - df = clean(df, 'drop', how = 'all') - - if df.empty: - std_out('Empty dataframe, ignoring', 'WARNING') - return False - - # Create object - ndev = Hclass(did = self.forwarding_params) - post_ok = ndev.post_data_to_device(df, chunk_size = chunk_size, - dry_run = dry_run, max_retries = 2) - - if post_ok: - # TODO Check if we like this - if self.source == 'api': - self.update_latest_postprocessing() - std_out(f'Posted data for {self.id}', 'SUCCESS') - else: - std_out(f'Error posting data for {self.id}', 'ERROR') - return post_ok - - else: - std_out('Empty forwarding information', 'ERROR') - return False - - def export(self, path, forced_overwrite = False, file_format = 'csv'): - ''' - Exports Device.data to file - Parameters - ---------- - path: String - Path to export file to, does not include filename. - The filename will be the Device.id property - forced_overwrite: boolean - False - Force data export in case of already existing file - file_format: String - 'csv' - File format to export. Current supported format CSV - Returns - --------- - True if exported ok, False otherwise - ''' - # Export device - if file_format == 'csv': - return export_csv_file(path, str(self.id), self.data, forced_overwrite = forced_overwrite) - else: - std_out('Not supported format' ,'ERROR') - return False - - # TODO Check - def post_sensors(self, clean_na = 'drop', chunk_size = 500, dry_run = False, max_retries = 2): - ''' - Posts devices sensors. Only available for parent of ScApiDevice - Parameters - ---------- - clean_na: string, optional - 'drop' - 'drop', 'fill' - chunk_size: integer - chunk size to split resulting pandas DataFrame for posting data - dry_run: boolean - False - Post the payload to the API or just return it - max_retries: int - 2 - Maximum number of retries per chunk - Returns - ---------- - boolean - True if posted ok, False otherwise - ''' - - post_ok = True - - rd = dict() - df = self.data.copy().dropna(axis = 0, how='all') - for col in self.data: - if col not in self.sensors: - std_out(f'Column ({col}) not in recognised IDs. Ignoring', 'WARNING') - df.drop(col, axis=1, inplace=True) - continue - rd[col] = self.sensors[col]['id'] - - df.rename(columns=rd, inplace=True) - - if df.empty: - std_out('Empty dataframe, ignoring', 'WARNING') - return False - - std_out(f'Trying to post {list(df.columns)}') - post_ok = self.handler.post_data_to_device(df, clean_na = clean_na, - chunk_size = chunk_size, dry_run = dry_run, max_retries = max_retries) - if post_ok: std_out(f'Posted data for {self.id}', 'SUCCESS') - else: std_out(f'Error posting data for {self.id}', 'ERROR') - - return post_ok - - # TODO Check - def update_postprocessing(self, dry_run = False): - ''' - Posts device postprocessing. Only available for parent of ScApiDevice - Parameters - ---------- - dry_run: boolean - False - Post the payload to the API or just return it - Returns - ---------- - boolean - True if posted ok, False otherwise - ''' - if not self.postprocessing_updated: - std_out(f'Postprocessing is not up to date', 'ERROR') - return False - - post_ok = self.api_device.patch_postprocessing(dry_run=dry_run) - - if post_ok: std_out(f"Postprocessing posted for device {self.id}", "SUCCESS") - return post_ok - - # TODO Check - def post_metrics(self, with_postprocessing = False, chunk_size = 500, dry_run = False, max_retries = 2): - ''' - Posts devices metrics. Only available for parent of ScApiDevice - Parameters - ---------- - with_postprocessing: boolean - False - Post the postprocessing_attributes too - chunk_size: integer - chunk size to split resulting pandas DataFrame for posting data - dry_run: boolean - False - Post the payload to the API or just return it - max_retries: int - 2 - Maximum number of retries per chunk - Returns - ---------- - boolean - True if posted ok, False otherwise - ''' - - post_ok = True - if self.sources[self.source]['handler'] != 'ScApiDevice': - std_out('Only supported processing post is to SmartCitizen API', 'ERROR') - return False - - rd = dict() - std_out(f"Posting metrics for device {self.id}") - # Make a copy of df - df = self.data.copy().dropna(axis = 0, how='all') - # Get metrics to post, only the ones that have True in 'post' field and a valid ID - # Replace their name with the ID to post - for metric in self.metrics: - if self.metrics[metric]['post'] == True and metric in self.data.columns: - std_out(f"Adding {metric} for device {self.id} (ID: {self.metrics[metric]['id']})") - rd[metric] = self.metrics[metric]['id'] - - # Keep only metrics in df - df = df[df.columns.intersection(list(rd.keys()))] - df.rename(columns=rd, inplace=True) - # Fill None or other values with actual NaN - df = df.fillna(value=nan) - - # If empty, avoid - if df.empty: - std_out('Empty dataframe, ignoring', 'WARNING') - return False - - std_out(f'Trying to post {list(df.columns)}') - post_ok = self.api_device.post_data_to_device(df, chunk_size = chunk_size, dry_run = dry_run, max_retries = max_retries) - if post_ok: std_out(f'Posted metrics for {self.id}', 'SUCCESS') - else: std_out(f'Error posting metrics for {self.id}', 'ERROR') - - # Post info if requested. It should be updated elsewhere - if with_postprocessing and post_ok: - post_ok &= self.update_postprocessing(dry_run=dry_run) - - if post_ok: std_out(f"Metrics posted for device {self.id}", "SUCCESS") - return post_ok +from .process import * +from .device import Device \ No newline at end of file diff --git a/scdata/device/device.py b/scdata/device/device.py new file mode 100644 index 00000000..8e6ff4cb --- /dev/null +++ b/scdata/device/device.py @@ -0,0 +1,684 @@ +''' Main implementation of class Device ''' + +from scdata.utils import logger, localise_date, \ + dict_fmerge, get_units_convf +from scdata.io import read_csv_file, export_csv_file +from scdata.utils import LazyCallable, url_checker, \ + get_json_from_url, find_by_field +from scdata._config import config +from scdata.io.device_api import * +from scdata.models import Blueprint, Metric, Source, APIParams, FileParams, DeviceOptions, Sensor + +from os.path import join, basename +from urllib.parse import urlparse +from pandas import DataFrame, to_timedelta, Timedelta +from numpy import nan +from collections.abc import Iterable +from importlib import import_module +from pydantic import TypeAdapter, BaseModel, ConfigDict +from pydantic_core import ValidationError +from typing import Optional, List +from json import dumps + +from timezonefinder import TimezoneFinder +tf = TimezoneFinder() + +class Device(BaseModel): + ''' Main implementation of the device class ''' + model_config = ConfigDict(arbitrary_types_allowed = True) + + blueprint: str = None + source: Source = Source() + # Convert this to Options + options: DeviceOptions = DeviceOptions() + params: object = None + metrics: List[Metric] = [] + meta: dict = dict() + hclass: object = None + handler: object = None + data: DataFrame = DataFrame() + loaded: bool = False + processed: bool = False + postprocessing_updated: bool = False + + def model_post_init(self, __context) -> None: + + ''' + Creates an instance of device. Devices are objects that contain sensors data, metrics + (calculations based on sensors data), and metadata such as units, dates, frequency and source + + Parameters: + ----------- + blueprint: String + Default: 'sck_21' + Defines the type of device. For instance: sck_21, sck_20, csic_station, muv_station + parrot_soil, sc_20_station, sc_21_station... A list of all the blueprints is found in + config.blueprints_urls and accessible via the scdata.utils.load_blueprints(urls) function. + The blueprint can also be defined from the postprocessing info in SCAPI. + The manual parameter passed here overrides that of the API. + + source: dict() + Default: empty dict + A dictionary containing a description of how to obtain the data from the device itself. + + params: dict() + Default: empty dict + A dictionary containing information about the device itself. Depending on the blueprint, this params needs to have different data. If not all the data is present, the corresponding blueprint's default will be used + + Returns + ---------- + Device object + ''' + + # Set handler + self.__set_handler__() + + # Set blueprint + if self.blueprint is not None: + if self.blueprint not in config.blueprints: + raise ValueError(f'Specified blueprint {self.blueprint} is not in available blueprints') + self.__set_blueprint_attrs__(config.blueprints[self.blueprint]) + else: + if url_checker(self.handler.blueprint_url): + logger.info(f'Loading postprocessing blueprint from:\n{self.handler.blueprint_url}') + self.blueprint = basename(urlparse(self.handler.blueprint_url).path).split('.')[0] + self.__set_blueprint_attrs__(self.handler.properties) + else: + raise ValueError(f'Specified blueprint url {self.handler.blueprint_url} is not valid') + + logger.info(f'Device {self.params.id} initialised') + + def __set_handler__(self): + # Add handlers here + if self.source.type == 'api': + try: + module = self.source.module + except: + # Default to device_api if not specified + module = 'scdata.io.device_api' + logger.warning(f'Module not specified. Defaulting to {module}') + pass + + # Try to find module + try: + hmod = import_module(module) + except ModuleNotFoundError: + logger.error(f"Module not found: {module}") + raise ModuleNotFoundError(f'Specified module not found') + else: + self.hclass = getattr(hmod, self.source.handler) + logger.info(f'Setting handler as {self.hclass}') + + elif self.source.type == 'file': + try: + module = self.source.module + except: + # Default to device_file if not specified + module = 'scdata.io.device_file' + logger.warning(f'Module not specified. Defaulting to {module}') + pass + + # Try to find module + try: + hmod = import_module(module) + except ModuleNotFoundError: + raise ModuleNotFoundError(f'Specified module not found {module}') + else: + self.hclass = getattr(hmod, self.source.handler) + logger.info(f'Setting handler as {self.hclass}') + + elif self.source.type == 'stream': + # TODO Add handler here + raise NotImplementedError('No handler for stream yet') + + # TODO - Fix to be able to pass other things that are not IDs + if self.hclass is not None: + self.handler = self.hclass(self.params.id) + else: + raise ValueError("Devices need one handler") + + def __set_blueprint_attrs__(self, blueprint): + # Set attributes + for item in blueprint: + if item not in vars(self): + raise ValueError(f'Invalid blueprint item {item}') + else: + # Small workaround for postponed fields + item_type = self.model_fields[item].annotation + self.__setattr__(item, TypeAdapter(item_type).validate_python(blueprint[item])) + + # Sensors renaming dict + self._sensors = TypeAdapter(List[Sensor]).validate_python(self.handler.sensors) + + self._rename = dict() + for channel in self._sensors: + if channel.id is not None: + _ch = find_by_field(config.names[self.source.handler], channel.id, 'id') + if _ch: + self._rename[channel.name] = _ch.name + else: + logger.warning(f'Channel {channel.name} has no id') + + # Metrics stay the same + for channel in self.metrics: + self._rename[channel.name] = channel.name + + # TODO - Improve? + @property + def valid_for_processing(self): + if self.blueprint is not None and \ + self.handler.hardware_postprocessing is not None and \ + self.handler.postprocessing is not None and \ + self.handler.blueprint_url is not None: + return True + else: + return False + + @property + def id(self): + return self.params.id + + def add_metric(self, metric = dict()): + ''' + Add a metric to the device to be processed by a callable function + Parameters + ---------- + metric: dict + Empty dict + Description of the metric to be added. It only adds it to + Device.metrics, but does not calculate anything yet. The metric dict needs + to follow the format: + metric = { + 'id': Optional[int] = None + (in an API for instance) + 'description': Optional[str] = '' + + 'name': [str] + + 'function': [str] + + 'args': Optional[dict] = None + + 'kwargs': Optional[dict] = None + <**kwargs for @function_name> + 'module': Optional[str] = 'scdata.device.process + ' + } + The 'module' parameter is only needed if the process is not + already available in scdata.device.process. + Example: + -------- + d.add_metric(metric={ + 'name': 'NO2_CLEAN', + 'function': 'clean_ts', + 'description': 'Clean NO2 channel', + 'units': 'ppb', + 'kwargs': { + 'name': 'NO2', + 'limits': [0, 350], + 'window_size': 5} + }) + Returns + ---------- + True if added metric + ''' + + if 'metrics' not in vars(self): + logger.info(f'Device {self.params.id} has no metrics yet. Adding') + self.metrics = list() + + _metric = TypeAdapter(Metric).validate_python(metric) + + if self.__check_callable__(_metric.module, _metric.function): + self.metrics.append(_metric) + + logger.info(f"Metric {_metric.name} added to metrics") + self._rename[_metric.name] = _metric.name + return True + + def del_metric(self, metric_name = ''): + if 'metrics' not in vars(self): raise ValueError('Device has no metrics') + m = find_by_field(self.metrics, metric_name, 'name') + if m: + self.metrics.remove(m) + else: + logger.warning(f'Metric {metric_name} not in metrics') + return False + if metric_name in self.data.columns: + self.data.__delitem__(metric_name) + + if metric_name not in self.data and find_by_field(self.metrics, metric_name, 'name') is None: + logger.info(f'Metric {metric_name} removed from metrics') + return True + return False + + async def load(self, cache=None, convert_units=True, convert_names=True, max_amount=None): + ''' + Loads the device with some options + + Parameters: + ----------- + cache: String + Default: None + Path were the cached file is, if any. Normally not needed to be provided, only for internal usage + convert_units: bool + Default: True + Convert units for channels based on config._channel_lut + convert_names: bool + Default: True + Convert names for channels based on ids + max_amount: int + Default: None + Trim dataframe to this amount for processing and forwarding purposes (workaround) + Returns + ---------- + True if loaded correctly + ''' + min_date = self.options.min_date + max_date = self.options.max_date + timezone = self.handler.timezone + frequency = self.options.frequency + clean_na = self.options.clean_na + resample = self.options.resample + cached_data = DataFrame() + + # Only case where cache makes sense + if self.source.type == 'api': + if cache is not None and cache: + if cache.endswith('.csv'): + cached_data = read_csv_file( + path = cache, + timezone = timezone, + frequency = frequency, + clean_na = clean_na, + resample = resample, + index_name = 'TIME') + else: + raise NotImplementedError(f'Cache needs to be a .csv file. Got {cache}.') + + if not cached_data.empty: + # Update min_date + min_date=cached_data.index[-1].tz_convert('UTC')+Timedelta(frequency) + + # Not implemented "for now" + elif self.source.type == 'stream': + raise NotImplementedError('Source type stream not implemented yet') + + # The methods below should be implemented from the handler type + if self.handler.method == 'async': + await self.handler.get_data( + min_date = min_date, + max_date = max_date, + frequency = frequency, + clean_na = clean_na, + resample = resample) + else: + self.handler.get_data( + min_date = min_date, + max_date = max_date, + frequency = frequency, + clean_na = clean_na, + resample = resample) + + # In principle this links both dataframes as they are unmutable + self.data = self.handler.data + # Wrap it all up + self.loaded = self.__load_wrapup__(max_amount, convert_units=convert_units, convert_names=convert_names, cached_data=cached_data) + + self.processed = False + return self.loaded + + def __load_wrapup__(self, max_amount, convert_units=True, convert_names=True, cached_data=None): + if self.data is not None: + if not self.data.empty: + if max_amount is not None: + # TODO Dirty workaround + logger.info(f'Trimming dataframe to {max_amount} rows') + self.data=self.data.dropna(axis = 0, how='all').head(max_amount) + # Convert names + if convert_names: + self.__convert_names__() + # Convert units + if convert_units: + self.__convert_units__() + self.postprocessing_updated = False + else: + logger.warning('Empty dataframe in data') + + if not cached_data.empty: + self.data = self.data.combine_first(cached_data) + + return not self.data.empty + + def __convert_names__(self): + logger.info('Converting names...') + + self.data.rename(columns=self._rename, inplace=True) + logger.info('Names converted') + + def __convert_units__(self): + ''' + Convert the units based on the UNIT_LUT and blueprint + NB: what is read/written from/to the cache is not converted. + The files are with original units, and then converted in the device only + for the data but never chached like so. + ''' + logger.info('Checking if units need to be converted...') + for sensor in self.data.columns: + _rename_inv = {v: k for k, v in self._rename.items()} + if sensor not in _rename_inv: + logger.info(f'Sensor {sensor} not renamed. Units check not needed') + continue + sensorm = find_by_field(self._sensors, _rename_inv[sensor], 'name') + + if sensorm is not None: + factor = get_units_convf(sensor, from_units = sensorm.unit) + + if factor != 1: + self.data.rename(columns={sensor: sensor + '_in_' + sensorm.unit}, inplace=True) + self.data.loc[:, sensor] = self.data.loc[:, sensor + '_in_' + sensorm.unit]*factor + else: + logger.info(f'No units conversion needed for sensor {sensor} (factor=1)') + else: + logger.warning('Sensor not found') + logger.info('Units check done') + + def __check_callable__(self, module, function): + # Check if the metric contains a custom module + lazy_name = f"{module}.{function}" + + try: + funct = LazyCallable(lazy_name) + except ModuleNotFoundError: + logger.error(f'Callable {lazy_name} not available') + return False + else: + return True + return False + + def process(self, only_new=False, lmetrics=None): + ''' + Processes devices metrics, either added by the blueprint definition + or the addition using Device.add_metric(). See help(Device.add_metric) for + more information about the definition of the metrics to be added. + + Parameters + ---------- + only_new: boolean + False + To process or not the existing channels in the Device.data that are + defined in Device.metrics + lmetrics: list + None + List of metrics to process. If none, processes all + Returns + ---------- + boolean + True if processed ok, False otherwise + ''' + + if not self.loaded: + logger.error('Need to load first (device.load())') + return False + + process_ok = True + self.postprocessing_updated = False + + if 'metrics' not in vars(self): + logger.warning(f'Device {self.params.id} has nothing to process. Skipping') + return process_ok + + logger.info('---------------------------') + logger.info(f'Processing device {self.params.id}') + if lmetrics is None: + _lmetrics = [metric.name for metric in self.metrics] + else: _lmetrics = lmetrics + + if not _lmetrics: + logger.warning('Nothing to process') + return process_ok + + for metric in self.metrics: + logger.info('---') + if metric.name not in _lmetrics: continue + logger.info(f'Processing {metric.name}') + + if only_new and metric.name in self.data: + logger.info(f'Skipping. Already in device') + continue + + if self.__check_callable__(metric.module, metric.function): + funct = LazyCallable(f"{metric.module}.{metric.function}") + else: + process_ok &= False + logger.error('Problem adding lazy callable to metrics list') + continue + + args, kwargs = list(), dict() + if 'args' in vars(metric): + if metric.args is not None: args = metric.args + if 'kwargs' in vars(metric): + if metric.kwargs is not None: kwargs = metric.kwargs + + try: + result = funct(self.data, *args, **kwargs) + except KeyError: + logger.error('Cannot process requested function with data provided') + process_ok = False + pass + else: + if result is not None: + self.data[metric.name] = result + process_ok &= True + # If the metric is None, might be for many reasons and shouldn't collapse the process_ok + + if process_ok: + logger.info(f"Device {self.params.id} processed") + self.processed = process_ok & self.update_postprocessing_date() + + return self.processed + + @property + def sensors(self): + return self._sensors + + def update_postprocessing_date(self): + latest_postprocessing = localise_date(self.data.index[-1]+\ + to_timedelta(self.options.frequency), 'UTC') + if self.handler.update_latest_postprocessing(latest_postprocessing): + if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing: + self.postprocessing_updated = True + else: + self.postprocessing_updated = False + return self.postprocessing_updated + + # TODO + def health_check(self): + return True + + # TODO - Decide if we keep it + # def forward(self, chunk_size = 500, dry_run = False, max_retries = 2): + # ''' + # Forwards data to another api. + # Parameters + # ---------- + # chunk_size: int + # 500 + # Chunk size to be sent to device.post_data_to_device in question + # dry_run: boolean + # False + # Post the payload to the API or just return it + # max_retries: int + # 2 + # Maximum number of retries per chunk + # Returns + # ---------- + # boolean + # True if posted ok, False otherwise + # ''' + + # # Import requested handler + # hmod = __import__('scdata.io.device_api', fromlist = ['io.device_api']) + # Hclass = getattr(hmod, config.connectors[self.forwarding_request]['handler']) + + # # Create new device in target API if it hasn't been created yet + # if self.forwarding_params is None: + # std_out('Empty forwarding information, attemping creating a new device', 'WARNING') + # # We assume the device has never been posted + # # Construct new device kwargs dictionary + # kwargs = dict() + # for item in config.connectors[self.forwarding_request]['kwargs']: + # val = config.connectors[self.forwarding_request]['kwargs'][item] + # if val == 'options': + # kitem = self.options[item] + # elif val == 'config': + # # Items in config should be underscored + # kitem = config.__getattr__(f'_{item}') + # elif isinstance(val, Iterable): + # if 'same' in val: + # if 'as_device' in val: + # if item == 'sensors': + # kitem = self.merge_sensor_metrics(ignore_empty = True) + # elif item == 'description': + # kitem = self.blueprint.replace('_', ' ') + # elif 'as_api' in val: + # if item == 'sensors': + # kitem = self.api_device.get_device_sensors() + # elif item == 'description': + # kitem = self.api_device.get_device_description() + # else: + # kitem = val + # kwargs[item] = kitem + + # response = Hclass.new_device(name = config.connectors[self.forwarding_request]['name_prepend']\ + # + str(self.params.id), + # location = self.location, + # dry_run = dry_run, + # **kwargs) + # if response: + # if 'message' in response: + # if response['message'] == 'Created': + # if 'sensorid' in response: + # self.forwarding_params = response['sensorid'] + # self.api_device.postprocessing['forwarding_params'] = self.forwarding_params + # std_out(f'New sensor ID in {self.forwarding_request}\ + # is {self.forwarding_params}. Updating') + + # if self.forwarding_params is not None: + # df = self.data.copy() + # df = df[df.columns.intersection(list(self.merge_sensor_metrics(ignore_empty=True).keys()))] + # df = clean(df, 'drop', how = 'all') + + # if df.empty: + # std_out('Empty dataframe, ignoring', 'WARNING') + # return False + + # # Create object + # ndev = Hclass(did = self.forwarding_params) + # post_ok = ndev.post_data_to_device(df, chunk_size = chunk_size, + # dry_run = dry_run, max_retries = 2) + + # if post_ok: + # # TODO Check if we like this + # if self.source == 'api': + # self.update_latest_postprocessing() + # std_out(f'Posted data for {self.params.id}', 'SUCCESS') + # else: + # std_out(f'Error posting data for {self.params.id}', 'ERROR') + # return post_ok + + # else: + # std_out('Empty forwarding information', 'ERROR') + # return False + + def export(self, path, forced_overwrite = False, file_format = 'csv'): + ''' + Exports Device.data to file + Parameters + ---------- + path: String + Path to export file to, does not include filename. + The filename will be the Device.id property + forced_overwrite: boolean + False + Force data export in case of already existing file + file_format: String + 'csv' + File format to export. Current supported format CSV + Returns + --------- + True if exported ok, False otherwise + ''' + # Export device + if self.data is None: + logger.error('Cannot export null data') + return False + if file_format == 'csv': + return export_csv_file(path, str(self.params.id), self.data, forced_overwrite = forced_overwrite) + else: + # TODO Make a list of supported formats + return NotImplementedError (f'Not supported format. Formats: [csv]') + + async def post(self, columns = 'sensors', clean_na = 'drop', chunk_size = 500,\ + dry_run = False, max_retries = 2, with_postprocessing = False): + ''' + Posts data via handler post method. + Parameters + ---------- + columns: str, optional + '' + 'metrics' or 'sensors'. Empty '' means 'all' + clean_na: string, optional + 'drop' + 'drop', 'fill' + chunk_size: integer + chunk size to split resulting pandas DataFrame for posting data + dry_run: boolean + False + Post the payload to the API or just return it + max_retries: int + 2 + Maximum number of retries per chunk + with_postprocessing: boolean + False + Update postprocessing information + Returns + ---------- + boolean + True if posted ok, False otherwise + ''' + + post_ok = await self.handler.post_data(columns=columns, \ + rename = self._rename, clean_na = clean_na, chunk_size = chunk_size, \ + dry_run = dry_run, max_retries = max_retries) + + if post_ok: logger.info(f'Posted data for {self.params.id}') + else: logger.error(f'Error posting data for {self.params.id}') + + # Post info if requested. It should be updated elsewhere + if with_postprocessing and post_ok and not dry_run: + post_ok &= self.update_postprocessing(dry_run=dry_run) + + return post_ok + + def update_postprocessing(self, dry_run = False): + ''' + Posts device postprocessing. Only available for parent of ScApiDevice + Parameters + ---------- + dry_run: boolean + False + Post the payload to the API or just return it + Returns + ---------- + boolean + True if posted ok, False otherwise + ''' + if not self.postprocessing_updated: + logger.info(f'Postprocessing is not up to date') + return False + + post_ok = self.handler.patch_postprocessing(dry_run=dry_run) + + if post_ok: logger.info(f"Postprocessing posted for device {self.params.id}") + return post_ok From deab14b46ef473d5ec6c2e0312f09d7d6c2a7e1f Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:59:02 +0200 Subject: [PATCH 23/72] Remove user api --- scdata/io/user_api.py | 69 ------------------------------------------- 1 file changed, 69 deletions(-) delete mode 100644 scdata/io/user_api.py diff --git a/scdata/io/user_api.py b/scdata/io/user_api.py deleted file mode 100644 index 2dfb9f58..00000000 --- a/scdata/io/user_api.py +++ /dev/null @@ -1,69 +0,0 @@ -from requests import get -from scdata.utils import std_out, localise_date -from os import environ - -from pandas import DataFrame - -class ScApiUser: - - API_BASE_URL = 'https://api.smartcitizen.me/v0/users/' - headers = {'Authorization':'Bearer ' + environ['SC_BEARER'], 'Content-type': 'application/json'} - - def __init__ (self, did = None, username = None): - self.id = did - self.username = username - self.devices = None - self.userjson = None - self.location = None - self.url = None - self.joined_at = None - self.updated_at = None - - def get_user_info(self): - if self.id is None and self.username is None: - std_out('Need at lease username or user id to make a valid request') - - if self.id is not None: self.get_user_json_by_id() - if self.username is not None: self.get_user_json_by_username() - - try: - self.devices = self.userjson['devices'] - self.location = self.userjson['location'] - - self.joined_at = self.userjson['joined_at'] - self.updated_at = self.userjson['updated_at'] - except: - std_out('Problem while getting user info', 'ERROR') - pass - else: - return True - - return False - - def get_user_json_by_id(self): - if self.userjson is None: - try: - userR = get(self.API_BASE_URL + '{}/'.format(self.id), headers = self.headers) - print (userR) - if userR.status_code == 200 or userR.status_code == 201: - self.userjson = userR.json() - else: - std_out('API reported {}'.format(userR.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - return self.userjson - - def get_user_json_by_username(self): - if self.userjson is None: - try: - userR = get(self.API_BASE_URL + '{}/'.format(self.username), headers = self.headers) - print (userR) - if userR.status_code == 200 or userR.status_code == 201: - self.userjson = userR.json() - else: - std_out('API reported {}'.format(userR.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - return self.userjson \ No newline at end of file From add8817a64db40dde5ce49c71d63bc5dfbdd7ed9 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:59:17 +0200 Subject: [PATCH 24/72] Remove firmware names --- scdata/io/firmware.py | 72 ------------------------------------------- 1 file changed, 72 deletions(-) delete mode 100644 scdata/io/firmware.py diff --git a/scdata/io/firmware.py b/scdata/io/firmware.py deleted file mode 100644 index 364167b9..00000000 --- a/scdata/io/firmware.py +++ /dev/null @@ -1,72 +0,0 @@ -from os.path import join -from urllib.request import urlopen -from scdata.utils import std_out -from scdata._config import config -import json -from re import sub -from traceback import print_exc - - -def get_firmware_names(sensorsh, json_path, file_name): - - # Directory - names_dict = join(json_path, file_name + '.json') - - if config.reload_firmware_names: - try: - # Read only 20000 chars - data = urlopen(sensorsh).read(20000).decode('utf-8') - # split it into lines - data = data.split('\n') - sensor_names = dict() - line_sensors = len(data) - for line in data: - if 'class AllSensors' in line: - line_sensors = data.index(line) - if data.index(line) > line_sensors: - - if 'OneSensor' in line and '{' in line and '}' in line and '/*' not in line: - # Split commas - line_tokenized = line.strip('').split(',') - - # Elimminate unnecessary elements - line_tokenized_sublist = list() - for item in line_tokenized: - item = sub('\t', '', item) - item = sub('OneSensor', '', item) - item = sub('{', '', item) - item = sub('}', '', item) - item = sub('"', '', item) - if item != '' and item != ' ': - while item[0] == ' ' and len(item)>0: item = item[1:] - line_tokenized_sublist.append(item) - line_tokenized_sublist = line_tokenized_sublist[:-1] - - if len(line_tokenized_sublist) > 2: - shortTitle = sub(' ', '', line_tokenized_sublist[3]) - if len(line_tokenized_sublist)>9: - sensor_names[shortTitle] = dict() - sensor_names[shortTitle]['SensorLocation'] = sub(' ', '', line_tokenized_sublist[0]) - sensor_names[shortTitle]['id'] = sub(' ','', line_tokenized_sublist[5]) - sensor_names[shortTitle]['title'] = line_tokenized_sublist[4] - sensor_names[shortTitle]['unit'] = line_tokenized_sublist[-1] - # Save everything to the most recent one - with open(names_dict, 'w') as fp: - json.dump(sensor_names, fp) - std_out('Saved updated sensor names and dumped into ' + names_dict, 'SUCCESS') - - except: - # Load sensors - print_exc() - with open(names_dict) as handle: - sensor_names = json.loads(handle.read()) - std_out('Error. Retrieving local version for sensors names', 'WARNING') - - else: - std_out('Retrieving local version for sensors names') - with open(names_dict) as handle: - sensor_names = json.loads(handle.read()) - if sensor_names is not None: std_out('Local version of sensor names loaded', 'SUCCESS') - - return sensor_names - From 46f8791efecf515091fa7396b58f34a1f611e68d Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:59:34 +0200 Subject: [PATCH 25/72] Add pydantic models --- scdata/models/__init__.py | 1 + scdata/models/models.py | 60 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 scdata/models/__init__.py create mode 100644 scdata/models/models.py diff --git a/scdata/models/__init__.py b/scdata/models/__init__.py new file mode 100644 index 00000000..8c45e6a7 --- /dev/null +++ b/scdata/models/__init__.py @@ -0,0 +1 @@ +from .models import TestOptions, DeviceOptions, Metric, Name, Blueprint, Source, Sensor, FileParams, APIParams \ No newline at end of file diff --git a/scdata/models/models.py b/scdata/models/models.py new file mode 100644 index 00000000..2f4da6d9 --- /dev/null +++ b/scdata/models/models.py @@ -0,0 +1,60 @@ +from pydantic import BaseModel +from typing import Optional, List +from datetime import datetime + +class TestOptions(BaseModel): + cache: Optional[bool] = False + +class Metric(BaseModel): + id: Optional[int] = None + name: str + description: Optional[str] = '' + module: Optional[str] = "scdata.device.process" + function: str + unit: Optional[str] = '' + post: Optional[bool] = False + args: Optional[dict] = None + kwargs: Optional[dict] = None + +class Sensor(BaseModel): + id: int + name: str + description: str + unit: Optional[str] = None + +class Source(BaseModel): + type: str = 'api' + module: str = 'smartcitizen_connector' + handler: str = 'SCDevice' + +class APIParams(BaseModel): + id: int + +class FileParams(BaseModel): + id: str # Compatible with API id + header_skip: Optional[List[int]] = [] + index: Optional[str] = 'TIME' + separator: Optional[str] = ',' + tzaware: Optional[bool] = True + timezone: Optional[str] = "UTC" + processed: Optional[str] = None + raw: Optional[str] = None + +class DeviceOptions(BaseModel): + clean_na: Optional[bool] = None + frequency: Optional[str] = '1Min' + resample: Optional[bool] = False + min_date: Optional[str] = None + max_date: Optional[str] = None + +class Blueprint(BaseModel): + meta: dict = dict() + metrics: List[Metric] = [] + source: Source = Source() + +class Name(BaseModel): + id: int + name: str + description: str + unit: str + From cdd52bb3be2e6ef1ed514c7c15f60ead1bfb2c1a Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 18:59:45 +0200 Subject: [PATCH 26/72] Move test to file --- scdata/test/__init__.py | 444 +--------------------------------------- scdata/test/test.py | 370 +++++++++++++++++++++++++++++++++ 2 files changed, 371 insertions(+), 443 deletions(-) create mode 100644 scdata/test/test.py diff --git a/scdata/test/__init__.py b/scdata/test/__init__.py index 28ddb5bd..ae15355c 100644 --- a/scdata/test/__init__.py +++ b/scdata/test/__init__.py @@ -1,443 +1 @@ -""" Main implementation of the class Test """ - -from os import makedirs -from os.path import join, exists -from shutil import copyfile, rmtree, make_archive -from traceback import print_exc -from datetime import datetime -import yaml -import json -import folium -from re import sub - -from scdata.utils import std_out, get_tests_log -from scdata.io import read_csv_file -from scdata._config import config -from scdata.device import Device -from scdata.test.plot.plot_tools import to_png_b64 - -class Test(object): - - from .plot import (ts_plot, ts_iplot, device_metric_map, path_plot, - scatter_plot, scatter_iplot, ts_scatter, - heatmap_plot, heatmap_iplot, - box_plot, ts_dendrogram, - ts_dispersion_plot, ts_dispersion_grid, - scatter_dispersion_grid) - #, report_plot, cat_plot, violin_plot) - - if config._ipython_avail: - from .plot import ts_uplot, ts_dispersion_uplot - from .export import to_csv, to_html - from .load import load - from .utils import (combine, prepare, dispersion_analysis, - dispersion_summary, get_common_channels, - gaps_check) - - def __init__(self, name): - - self.options = { - 'cached_data_margin': config.data['cached_data_margin'], - 'load_cached_api': config.data['load_cached_api'], - 'store_cached_api': config.data['store_cached_api'], - 'clean_na': config.data['clean_na'] - } - - if self.__check_tname__(name): self.__set_tname__(name) - - self.details = dict() - self.devices = dict() - self.descriptor = {'id': self.full_name} - - self._default_fields = { - 'id': '', - 'comment': '', - 'notes': '', - 'project': '', - 'author': '', - 'commit': '', - 'devices': dict(), - 'report': '', - 'type_test': '' - } - - # Dict for report - self.content = dict() - - # Dispersion analysis - self.dispersion_df = None - self._dispersion_summary = None - self.common_channels = None - - def __str__(self): - return self.full_name - - - @property - def default_fields(self): - return self._default_fields - - def __set_options__(self, options): - - test_options = [ - 'load_cached_api', - 'store_cached_api', - 'clean_na', - 'frequency', - 'min_date', - 'max_date', - 'resample' - ] - - for option in test_options: - if option in options.keys(): - self.options[option] = options[option] - - def __set_tname__(self, name): - current_date = datetime.now() - self.full_name = f'{current_date.year}_{str(current_date.month).zfill(2)}_{name}' - self.path = join(config.paths['processed'], str(current_date.year), \ - str(current_date.month).zfill(2), self.full_name) - std_out (f'Full Name: {self.full_name}') - - def __check_tname__(self, name): - test_log = get_tests_log() - test_logn = list(test_log.keys()) - - if not any([name in tlog for tlog in test_logn]): - return name - else: - undef_test = True - - while undef_test: - - # Wait for input - poss_names = list() - std_out ('Possible tests found', force = True) - for ctest in test_logn: - if name in ctest: - poss_names.append(test_logn.index(ctest) + 1) - std_out (str(test_logn.index(ctest) + 1) + ' --- ' + ctest, force = True) - std_out ('// --- \\\\', force = True) - if len(poss_names) == 1: - which_test = str(poss_names[0]) - else: - which_test = input('Similar tests found, please select one or input other name [New]: ') - - if which_test == 'New': - new_name = input('Enter new name: ') - break - elif which_test.isdigit(): - if int(which_test) in poss_names: - self.full_name = test_logn[int(which_test)-1] - self.path = test_log[self.full_name]['path'] - std_out(f'Test full name, {self.full_name}', force = True) - return False - else: - std_out("Type 'New' for other name, or test number in possible tests", 'ERROR') - else: - std_out("Type 'New' for other name, or test number", 'ERROR') - if self.__check_tname__(new_name): self.__set_tname__(new_name) - - def create(self, force = False): - # Create folder structure under data subdir - if not exists(self.path): - std_out('Creating new test') - makedirs(self.path) - else: - if not force: - std_out (f'Test already exists with this name. Full name: {self.full_name}. Maybe force = True?', 'ERROR') - return None - else: - std_out (f'Overwriting test. Full name: {self.full_name}') - - self.__update_descriptor__() - self.__preprocess__() - - std_out (f'Test creation finished. Name: {self.full_name}', 'SUCCESS') - return self.full_name - - def purge(self): - # Check if the folder structure exists - if not exists(self.path): - std_out('Test folder doesnt exist', 'ERROR') - else: - std_out (f'Purging cached directory in: {self.path}') - try: - rmtree(join(self.path, 'cached')) - except: - std_out('Error while purging directory', 'ERROR') - pass - else: - std_out (f'Purged cached folder', 'SUCCESS') - return True - return False - - def add_details(self, details): - ''' - details: a dict containing the information about the test. Minimum of: - - project - - commit - - author - - test_type - - report - - comment - ''' - - for detail in details.keys(): self.details[detail] = details[detail] - - def add_devices_list(self, devices_list, blueprint): - ''' - Convenience method to add devices from a list of api devices with a certain blueprint - Params: - devices_list: list - Contains devices ids (str or int) - - blueprint: String - Blueprint name - ''' - if blueprint is None: return False - - if blueprint not in config.blueprints.keys(): - std_out(f'Blueprint {blueprint} not in blueprints', 'ERROR') - return False - - for device in devices_list: - self.add_device(Device(blueprint = blueprint , descriptor = {'source': 'api', - 'id': str(device) - } - ) - ) - return True - - def add_device(self, device): - ''' - Adds a device to the test. The device has to be an instance of 'scdata.device.Device' - ''' - if device.id not in self.devices.keys(): - self.devices[device.id] = device - return True - else: - std_out(f'Device {device.id} is duplicated', 'WARNING') - return False - - def add_content(self, title, figure = None, text = None, iframe = None, show_title = True, force = False): - ''' - Adds content for the rendered flask template of the test. Content is a dict() - which contains a key per title (replacing ' ' with '_') and the content in it. - - Parameters - ---------- - title - None - Content title. Needs to not be None - figure - None - matplotlib or similar figure that can be converted to base64 - text - None - Text to be converted to

html tag with additional inner html (jinja2 safe rendered) - iframe - None - HTML iframe contanining anything - show_title - True - show title in HTML

tag - force - If already added content with this title - - Returns - ---------- - True if content added, false otherwise - - ''' - - title_cor = sub('\W|^(?=\d)','_', title) - - if title_cor not in self.content or force: - self.content[title_cor] = dict() - - if title is not None: - self.content[title_cor]['title'] = title - if figure is not None: - self.content[title_cor]['image'] = to_png_b64(figure) - if text is not None: - self.content[title_cor]['text'] = text - if iframe is not None: - self.content[title_cor]['iframe'] = iframe - - self.content[title_cor]['show_title'] = show_title - - std_out('Item added', 'SUCCESS') - return True - - else: - std_out('Item not added as its already in content', 'ERROR') - return False - - def process(self, only_new = False): - ''' - Calculates all the metrics in each of the devices - Returns True if done OK - ''' - process_ok = True - for device in self.devices: process_ok &= self.devices[device].process(only_new = only_new) - - # Cosmetic output - if process_ok: std_out(f'Test {self.full_name} processed', 'SUCCESS') - else: std_out(f'Test {self.full_name} not processed', 'ERROR') - - return process_ok - - def __preprocess__(self): - ''' - Processes the files for one test, given that the devices and details have been added - ''' - - std_out('Processing files') - - def get_raw_files(): - list_raw_files = [] - for device in self.devices.keys(): - if self.devices[device].source == 'csv': - list_raw_files.append(self.devices[device].raw_data_file) - - return list_raw_files - - def copy_raw_files(_raw_src_path, _raw_dst_path, _list_raw_files): - try: - - for item in _list_raw_files: - s = join(_raw_src_path, item) - d = join(_raw_dst_path, item.split('/')[-1]) - copyfile(s, d) - - std_out('Copy raw files: OK', 'SUCCESS') - - return True - - except: - std_out('Problem copying raw files', 'ERROR') - print_exc() - return False - - def date_parser(s, a): - return parser.parse(s).replace(microsecond=int(a[-3:])*1000) - - # Define paths - raw_src_path = join(config.paths['data'], 'raw') - raw_dst_path = join(self.path, 'raw') - - # Create path - if not exists(raw_dst_path): makedirs(raw_dst_path) - - # Get raw files - list_raw_files = get_raw_files() - - # Copy raw files and process data - if len(list_raw_files): - if copy_raw_files(raw_src_path, raw_dst_path, list_raw_files): - - # Process devices - for device_name in self.devices.keys(): - - device = self.devices[device_name] - - if device.source == 'csv': - - std_out ('Processing csv from device {}'.format(device.id)) - src_path = join(raw_src_path, device.raw_data_file) - dst_path = join(self.path, device.processed_data_file) - - # Load csv file, only localising and removing - df = read_csv_file(file_path = src_path, - timezone = device.timezone, - frequency = device.frequency, - clean_na = None, - index_name = device.sources[device.source]['index'], - skiprows = device.sources[device.source]['header_skip'], - sep = device.sources[device.source]['sep'], - tzaware = device.sources[device.source]['tz-aware'], - resample = device.resample - ) - df.index.rename(config._csv_defaults['index_name'], inplace=True) - df.to_csv(dst_path, sep=config._csv_defaults['sep']) - - std_out('Files preprocessed') - std_out(f'Test {self.full_name} path: {self.path}') - - def __update_descriptor__(self): - if self.descriptor == {}: self.std_out('No descriptor file to update') - - for field in self._default_fields: - if field not in self.descriptor.keys(): self.descriptor[field] = self._default_fields[field] - - # Add details to descriptor, or update them if there is anything in details - for detail in self.details.keys(): self.descriptor[detail] = self.details[detail] - # Add devices to descriptor - for device_name in self.devices.keys(): - - device = self.devices[device_name] - - if device.source == 'csv': - device.processed_data_file = self.full_name + '_' + str(device.id) + '.csv' - - dvars = vars(device).copy() - for discvar in config._discvars: - if discvar in dvars: - dvars.pop(discvar) - - self.descriptor['devices'][device.id] = dvars - - # Create yaml with test description - with open(join(self.path, 'test_description.yaml'), 'w') as yaml_file: - yaml.dump(self.descriptor, yaml_file) - - std_out ('Descriptor file updated') - - def compress(self, cformat = 'zip', selection = 'full'): - ''' - Compress the test folder (or selected folder) into a defined - format in the test.path directory - - Parameters - ---------- - cformat - 'zip' - String. Valid shutil.make_archive input: 'zip', 'tar', - 'gztar', 'bztar', 'xztar' - selection - 'full' - String. Selection of folders to compress. Either 'full', - 'cached' or 'raw'. If 'full', compresses the whole test, - including test_description.yaml - - Returns - ---------- - True if all good, False otherwise - - ''' - - - if cformat not in ['zip', 'tar', 'gztar', 'bztar', 'xztar']: - std_out('Invalid format', 'ERROR') - return False - - if selection not in ['full', 'cached', 'raw']: - std_out('Invalid selection (valid options: full, cached, raw', 'ERROR') - return False - - if selection == 'full': - _root_dir = self.path - elif selection == 'cached': - _root_dir = join(self.path, 'cached') - elif selection == 'raw': - _root_dir = join(self.path, 'raw') - - fname_t = join(self.path.strip(f'{self.full_name}')[:-1], self.full_name + f'_{selection}') - make_archive(fname_t, cformat, root_dir=_root_dir) - - fname = fname_t + '.' + cformat - if not exists(fname): return False - - return fname +from .test import Test \ No newline at end of file diff --git a/scdata/test/test.py b/scdata/test/test.py new file mode 100644 index 00000000..ef2f8fb8 --- /dev/null +++ b/scdata/test/test.py @@ -0,0 +1,370 @@ +""" Main implementation of the class Test """ + +from os import makedirs +from os.path import join, exists +from shutil import copyfile, rmtree, make_archive +from traceback import print_exc +from datetime import datetime, timedelta +import yaml +import json +import folium +import asyncio +from re import sub +from pydantic import TypeAdapter, BaseModel, ConfigDict, model_serializer +from typing import Optional, List, Dict, Any + +from scdata.utils import logger, localise_date, find_by_field +from scdata.io import read_csv_file, export_csv_file +from scdata._config import config +from scdata.device import Device +from scdata.models import TestOptions#, TestResult + +class Test(BaseModel): + + from .plot import (ts_plot, ts_iplot, device_metric_map, path_plot, + scatter_plot, scatter_iplot, ts_scatter, + heatmap_plot, heatmap_iplot, + box_plot, ts_dendrogram, + ts_dispersion_plot, ts_dispersion_grid, + scatter_dispersion_grid) + #, report_plot, cat_plot, violin_plot) + + if config._ipython_avail: + from .plot import ts_uplot, ts_dispersion_uplot + from .export import to_csv, to_html + from .tools import combine, prepare, history + from .dispersion import dispersion_analysis, dispersion_summary + from .checks import get_common_channels, gaps_check + + model_config = ConfigDict(arbitrary_types_allowed = True) + name: str + path: str = '' + devices: List[Device] = [] + options: TestOptions = TestOptions() + # TODO - Define test types based on enum + # dev + # deployment... + type: str = 'dev' + new: bool = False + loaded: bool = False + force_recreate: bool = False + # results: List[TestResult] = [] + + def model_post_init(self, __context) -> None: + + if self.__check_tname__(self.name): + self.__set_tname__(self.name) + + if self.new or self.force_recreate: + logger.info('New test') + self.create() + else: + with open(join(self.path, 'test.json'), 'r') as file: + tj = json.load(file) + + self.devices = TypeAdapter(List[Device]).validate_python(tj['devices']) + self.options = TypeAdapter(Options).validate_python(tj['options']) + self.type = tj['meta']['type'] + if self.name != tj['meta']['name']: + raise ValueError('Name not matching') + + if self.path != tj['meta']['path']: + raise ValueError('Path not matching') + + # TODO + # Dispersion analysis + # self.dispersion_df = None + # self._dispersion_summary = None + # self.common_channels = None + + def __str__(self): + return self.__full_name__ + + def __set_tname__(self, name): + current_date = datetime.now() + self.name = f'{current_date.year}_{str(current_date.month).zfill(2)}_{name}' + self.path = join(config.paths['processed'], str(current_date.year), \ + str(current_date.month).zfill(2), self.name) + + logger.info (f'Full Name: {self.name}') + + def __check_tname__(self, name): + test_log = self.history() + test_logn = list(test_log.keys()) + + if not any([name in tlog for tlog in test_logn]): + logger.info ('Test is new') + self.new = True + return name + else: + self.new = False + undef_test = True + while undef_test: + # Wait for input + possible_names = list() + logger.info ('Possible tests found:') + for ctest in test_logn: + if name in ctest: + possible_names.append(test_logn.index(ctest) + 1) + logger.info (str(test_logn.index(ctest) + 1) + ' --- ' + ctest) + logger.info ('// --- \\\\') + if len(possible_names) == 1: + which_test = str(possible_names[0]) + else: + which_test = input('Similar tests found, please select one or input other name [New]: ') + + if which_test == 'New': + new_name = input('Enter new name: ') + break + elif which_test.isdigit(): + if int(which_test) in possible_names: + self.name = test_logn[int(which_test)-1] + self.path = test_log[self.name]['path'] + logger.info(f'Test full name, {self.name}') + return False + else: + logger.error("Type 'New' for other name, or test number in possible tests") + else: + logger.error("Type 'New' for other name, or test number") + + if self.__check_tname__(new_name): + self.__set_tname__(new_name) + + def create(self): + # Create folder structure under data subdir + if not exists(self.path): + logger.info('Creating new test') + makedirs(self.path) + else: + if not self.force_recreate: + logger.error (f'Test already exists with this name. \ + Full name: {self.name}. Maybe force_recreate = True?') + return None + else: + logger.info (f'Overwriting test. Full name: {self.name}') + + self.__preprocess__() + self.__dump__() + + logger.info (f'Test creation finished. Name: {self.name}') + return self.name + + def purge(self): + # Check if the folder structure exists + if not exists(self.path): + logger.error('Test folder doesnt exist') + else: + logger.info (f'Purging cached directory in: {self.path}') + try: + rmtree(join(self.path, 'cached')) + except: + logger.error('Error while purging directory') + pass + else: + logger.info (f'Purged cached folder') + return True + return False + + def get_device(self, device_id): + did = find_by_field(self.devices, device_id, 'id') + if did is None: + logger.error(f'Device {device_id} is not in test') + return did + + # TODO - Do we want this with asyncio? + def process(self, only_new = False): + ''' + Calculates all the metrics in each of the devices + Returns True if done OK + ''' + process_ok = True + for device in self.devices: + process_ok &= device.process(only_new = only_new) + + # Cosmetic output + if process_ok: logger.info(f'Test {self.name} processed') + else: logger.error(f'Test {self.name} not processed') + + return process_ok + + # TODO - CHECK FOR CSV FILES + def __preprocess__(self): + ''' + Processes the files for one test, given that the devices and details have been added + ''' + + logger.info('Processing files...') + def get_raw_files(): + list_raw_files = [] + for device in self.devices: + if device.source.type == 'csv': + list_raw_files.append(device.source.files.raw_data_file) + + return list_raw_files + + def copy_raw_files(_raw_src_path, _raw_dst_path, _list_raw_files): + try: + + for item in _list_raw_files: + s = join(_raw_src_path, item) + d = join(_raw_dst_path, item.split('/')[-1]) + copyfile(s, d) + + logger.info('Copy raw files: OK') + + return True + + except: + logger.error('Problem copying raw files') + print_exc() + return False + + def date_parser(s, a): + return parser.parse(s).replace(microsecond=int(a[-3:])*1000) + + # Define paths + raw_src_path = join(config.paths['data'], 'raw') + raw_dst_path = join(self.path, 'raw') + + # Create path + if not exists(raw_dst_path): makedirs(raw_dst_path) + + # Get raw files + list_raw_files = get_raw_files() + + # Copy raw files and process data + if len(list_raw_files): + if copy_raw_files(raw_src_path, raw_dst_path, list_raw_files): + + # Process devices + for device in self.devices: + if device.source.type == 'csv': + + logger.info (f'Processing csv from device {device.id}...') + src_path = join(raw_src_path, device.raw_data_file) + dst_path = join(self.path, device.processed_data_file) + + # Load csv file, only localising and removing + df = read_csv_file(file_path = src_path, + timezone = device.timezone, + frequency = device.frequency, + clean_na = None, + index_name = device.sources[device.source]['index'], + skiprows = device.sources[device.source]['header_skip'], + sep = device.sources[device.source]['sep'], + tzaware = device.sources[device.source]['tz-aware'], + resample = device.resample + ) + df.index.rename(config._csv_defaults['index_name'], inplace=True) + df.to_csv(dst_path, sep=config._csv_defaults['sep']) + + logger.info('Files preprocessed') + logger.info(f'Test {self.name} path: {self.path}') + + @model_serializer + def ser_model(self) -> Dict[str, Any]: + + return { + 'meta': { + 'name': self.name, + 'path': self.path, + 'type': self.type + }, + 'options': self.options.model_dump(), + 'devices': [{'params': device.params.model_dump(), + 'metrics': [metric.model_dump() for metric in device.metrics], + 'source': device.source.model_dump(), + 'blueprint': device.blueprint} + for device in self.devices] + } + + def __dump__(self): + with open(join(self.path, 'test.json'), 'w') as file: + json.dump(self.ser_model(), file, indent=4) + + def compress(self, cformat = 'zip', selection = 'full'): + ''' + Compress the test folder (or selected folder) into a defined + format in the test.path directory + + Parameters + ---------- + cformat + 'zip' + String. Valid shutil.make_archive input: 'zip', 'tar', + 'gztar', 'bztar', 'xztar' + selection + 'full' + String. Selection of folders to compress. Either 'full', + 'cached' or 'raw'. If 'full', compresses the whole test, + including test_description.yaml + + Returns + ---------- + True if all good, False otherwise + ''' + if cformat not in ['zip', 'tar', 'gztar', 'bztar', 'xztar']: + logger.error('Invalid format') + return False + + if selection not in ['full', 'cached', 'raw']: + logger.error('Invalid selection (valid options: full, cached, raw') + return False + + if selection == 'full': + _root_dir = self.path + elif selection == 'cached': + _root_dir = join(self.path, 'cached') + elif selection == 'raw': + _root_dir = join(self.path, 'raw') + + fname_t = join(self.path.strip(f'{self.full_name}')[:-1], self.full_name + f'_{selection}') + make_archive(fname_t, cformat, root_dir=_root_dir) + + fname = fname_t + '.' + cformat + if not exists(fname): return False + + return fname + + def cache(self): + logger.info(f'Caching files...') + for device in self.devices: + logger.info(f'Caching files for {device.id}...') + + cached_file_path = join(self.path, 'cached') + if not exists(cached_file_path): + logger.info('Creating path for exporting cached data') + makedirs(cached_file_path) + + if device.export(cached_file_path, forced_overwrite = True, file_format = 'csv'): + logger.info(f'Device {device.id} cached') + + return all([exists(join(self.path, 'cached', f'{d.id}.csv')) for d in self.devices]) + + async def load(self): + ''' + Loads the test data and the different devices. + + Returns + ---------- + None + ''' + logger.info('Loading test...') + + tasks = [] + semaphore = asyncio.Semaphore(config._max_concurrent_requests) + + for device in self.devices: + # Check for cached data + cached_file_path = '' + if self.options.cache: + tentative_path = join(self.path, 'cached', f'{device.id}.csv') + if exists(tentative_path): cached_file_path = tentative_path + # Append task + tasks.append(asyncio.ensure_future(device.load(cache=cached_file_path))) + await asyncio.gather(*tasks) + + logger.info('Test load done') + if self.options.cache: self.cache() + + return all([d.loaded for d in self.devices]) \ No newline at end of file From 5a04132ee13c9d810401ac86ab74a4ef56af678e Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:00:33 +0200 Subject: [PATCH 27/72] Remove NILU file --- tests/devices/test_nilu.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/devices/test_nilu.py diff --git a/tests/devices/test_nilu.py b/tests/devices/test_nilu.py deleted file mode 100644 index e69de29b..00000000 From c241bacb74c800b12eaceb721007ebb42d0a26a4 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:00:55 +0200 Subject: [PATCH 28/72] Add find by field in utils and remove logs --- scdata/utils/__init__.py | 2 +- scdata/utils/find.py | 11 +++++++++++ scdata/utils/logs.py | 31 ------------------------------- 3 files changed, 12 insertions(+), 32 deletions(-) create mode 100644 scdata/utils/find.py delete mode 100644 scdata/utils/logs.py diff --git a/scdata/utils/__init__.py b/scdata/utils/__init__.py index 7b92654e..9d027cd4 100644 --- a/scdata/utils/__init__.py +++ b/scdata/utils/__init__.py @@ -3,13 +3,13 @@ from .units import get_units_convf from .dictmerge import dict_fmerge from .lazy import LazyCallable -from .logs import get_tests_log from .meta import get_current_blueprints, load_blueprints, get_json_from_url, load_names from .stats import spearman, get_metrics from .cleaning import clean from .location import get_elevation from .url_check import url_checker from .headers import process_headers +from .find import find_by_field # from .other.manage_post_info import create_post_info # from .zenodo import zenodo_upload # from .report import include_footer diff --git a/scdata/utils/find.py b/scdata/utils/find.py new file mode 100644 index 00000000..73b3d10d --- /dev/null +++ b/scdata/utils/find.py @@ -0,0 +1,11 @@ +from scdata.utils import logger + +def find_by_field(models, value, field): + try: + item = next(model for _, model in enumerate(models) if model.__getattribute__(field) == value) + except StopIteration: + # logger.info(f'Column {field} or value {value} not in models') + pass + else: + return item + return None \ No newline at end of file diff --git a/scdata/utils/logs.py b/scdata/utils/logs.py deleted file mode 100644 index bef9d4e7..00000000 --- a/scdata/utils/logs.py +++ /dev/null @@ -1,31 +0,0 @@ -from os import walk -from os.path import join -import yaml -from scdata._config import config - -def get_tests_log(deep_description = False): - ''' - Gets the tests in the given dir, looking for test_description.yaml - ''' - - # Get available tests in the data folder structure - tests = dict() - - for root, dirs, files in walk(config.paths['processed']): - for file in files: - if file.endswith(".yaml"): - test_name = root.split('/')[-1] - if test_name.startswith('.'): continue - - tests[test_name] = dict() - tests[test_name]['path'] = root - - if deep_description == True: - filePath = join(root, file) - with open(filePath, 'r') as stream: - yamlFile = yaml.load(stream, Loader = yaml.FullLoader) - for key in yamlFile.keys(): - if key == 'devices': continue - tests[test_name][key] = yamlFile[key] - - return tests \ No newline at end of file From dff12c87e7e2de468beb4e7c70a9806dcc1fe657 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:01:14 +0200 Subject: [PATCH 29/72] Remove dedicated test load --- scdata/test/load/__init__.py | 1 - scdata/test/load/load.py | 217 ----------------------------------- 2 files changed, 218 deletions(-) delete mode 100644 scdata/test/load/__init__.py delete mode 100644 scdata/test/load/load.py diff --git a/scdata/test/load/__init__.py b/scdata/test/load/__init__.py deleted file mode 100644 index ad09d5c1..00000000 --- a/scdata/test/load/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .load import load \ No newline at end of file diff --git a/scdata/test/load/load.py b/scdata/test/load/load.py deleted file mode 100644 index fde04ac4..00000000 --- a/scdata/test/load/load.py +++ /dev/null @@ -1,217 +0,0 @@ -from scdata.utils import std_out, localise_date -from scdata.io import read_csv_file, export_csv_file -from scdata.device import Device -from os import makedirs -from os.path import join, exists -import yaml -from datetime import timedelta - -def load(self, options = dict()): - - ''' - Loads the test data and the different devices - - Parameters: - ----------- - options: dict() - - load_cached_api: bool - Default: config.data['load_cached_api'] - Load or not cached data from the API in previous test loads - - store_cached_api: bool - Default: config.data['store_cached_api'] - Cache or not newly downloaded API data for future test loads - - clean_na: String - Default: None - Clean NaN as pandas format. Possibilities: 'fill_na', 'drop_na' or None - - frequency: String (timedelta format: https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags) - Default: 1Min - Frequency to load or request data - - min_date: String or datetime - Default: None - Minimum data to load data from - - max_date: String or datetime - Default: None - Maximum date to load data to - - resample: Boolean - Default: True - Resample timestamp to the "frequency" resolution - Returns - ---------- - None - ''' - - # Load descriptor - std_out(f'Loading test {self.full_name}') - if not exists(self.path): - std_out('Test does not exist with that name. Have you already created it? Hint: test.create()', 'ERROR') - return - - with open(join(self.path, 'test_description.yaml'), 'r') as descriptor_file: - self.descriptor = yaml.load(descriptor_file, Loader = yaml.FullLoader) - - # Add devices - for key in self.descriptor['devices'].keys(): - self.devices[key] = Device(self.descriptor['devices'][key]['blueprint'], - self.descriptor['devices'][key]) - - # Set options - self.__set_options__(options) - - std_out (f'Using options for test: {self.options}') - - for key in self.devices.keys(): - - device = self.devices[key] - std_out('---------------------------') - std_out(f'Loading device {device.id}') - - min_date_device = localise_date(device.min_date, device.timezone) - max_date_device = localise_date(device.max_date, device.timezone) - - # If device comes from API, pre-check dates - if device.source == 'api': - - if device.timezone is None: - device.timezone = device.api_device.get_device_timezone() - min_date_device = localise_date(device.min_date, device.timezone) - max_date_device = localise_date(device.max_date, device.timezone) - - # Get last reading from API - if 'get_device_last_reading' in dir(device.api_device): - last_reading_api = localise_date(device.api_device.get_device_last_reading(), - device.timezone) - - if self.options['load_cached_api']: - std_out(f'Checking if we can load cached data') - if not device.load(options = self.options, - path = join(self.path, 'cached'), - convert_units = False): - - std_out(f'No valid cached data. Requesting device {device.id} to API', 'WARNING') - min_date_to_load = localise_date(device.options['min_date'], - device.timezone) - max_date_to_load = localise_date(device.options['max_date'], - device.timezone) - load_API = True - - else: - - std_out(f'Loaded cached files', 'SUCCESS') - std_out(f'Checking if new data is to be loaded') - - # Get last reading from cached - last_reading_cached = localise_date(device.readings.index[-1], device.timezone) - std_out(f'Last cached date {last_reading_cached}') - std_out(f'Last reading in API {last_reading_api}') - - # Check which dates to load - if max_date_device is not None: - std_out(f'Max date in test {max_date_device}') - # Check what where we need to load data from, if any - if last_reading_cached < max_date_device and last_reading_api > last_reading_cached + timedelta(hours=1): - load_API = True - combine_cache_API = True - min_date_to_load = last_reading_cached - max_date_to_load = min(max_date_device, last_reading_api) - std_out('Loading new data from API') - else: - load_API = False - std_out('No need to load new data from API') - else: - # If no test data specified, check the last reading in the API - if last_reading_api > (last_reading_cached + timedelta(hours=self.options['cached_data_margin'])): - load_API = True - combine_cache_API = True - min_date_to_load = last_reading_cached - max_date_to_load = last_reading_api - std_out('Loading new data from API') - else: - load_API = False - std_out('No need to load new data from API') - else: - min_date_to_load = min_date_device - max_date_to_load = max_date_device - load_API = True - else: - if self.options['load_cached_api']: - std_out('Cannot load cached data without last reading available', 'WARNING') - min_date_to_load = min_date_device - max_date_to_load = max_date_device - last_reading_api = None - load_API = True - - # Load data from API if necessary - if load_API: - std_out('Downloading device from API') - - if last_reading_api is not None: - - # Check which min date to load - if min_date_to_load is not None: - std_out('First reading requested: {}'.format(min_date_to_load)) - if min_date_to_load > last_reading_api: - std_out('Discarding device. Min date requested is after last reading', 'WARNING') - continue - else: - std_out('Requesting all available data', 'WARNING') - - # Check which max date to load - if max_date_to_load is not None: - std_out('Last reading requested: {}'.format(max_date_to_load)) - if max_date_to_load > last_reading_api: - # Not possible to load what has not been stored - std_out('Requesting up to max available date in the API {}'.format(last_reading_api)) - max_date_to_load = last_reading_api - else: - # Just put None and we will handle it later - std_out('Requesting up to max available date in the API {}'.format(last_reading_api)) - max_date_to_load = last_reading_api - - # else: - # std_out('Discarding device. No max date available', 'WARNING') - # continue - - device_options = { - 'clean_na': self.options['clean_na'], - 'min_date': min_date_to_load, - 'max_date': max_date_to_load - } - - if 'frequency' in self.options: - device_options['frequency'] = self.options['frequency'] - - if 'resample' in self.options: - device_options['resample'] = self.options['resample'] - - device.load(options = device_options) - - elif device.source == 'csv': - - device.load(options = self.options, path = self.path, follow_defaults = True) - - if self.options['store_cached_api'] and device.loaded and device.source == 'api' and load_API: - - std_out(f'Caching files for {device.id}') - - cached_file_path = join(self.path, 'cached') - if not exists(cached_file_path): - std_out('Creating path for exporting cached data') - makedirs(cached_file_path) - - if export_csv_file(cached_file_path, device.id, device.readings, forced_overwrite = True): - std_out('Devices cached', 'SUCCESS') - - if device.loaded: std_out(f'Device {device.id} has been loaded', 'SUCCESS') - else: std_out(f'Could not load device {device.id}. Skipping', 'WARNING') - - # TODO IMPORTANT! - self.__update_descriptor__() - std_out('Test load done', 'SUCCESS') - self.loaded = True \ No newline at end of file From 3cb27ec4f07d9f895659adc6cf6192601b9d5c10 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:01:43 +0200 Subject: [PATCH 30/72] Missing logger transition --- scdata/test/export/to_file.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scdata/test/export/to_file.py b/scdata/test/export/to_file.py index 808ef605..a21ef10b 100755 --- a/scdata/test/export/to_file.py +++ b/scdata/test/export/to_file.py @@ -2,7 +2,7 @@ from os.path import join, dirname, exists from os import makedirs -from scdata.utils import std_out +from scdata.utils import logger import flask from re import sub @@ -20,7 +20,7 @@ def to_csv(self, path = None, forced_overwrite = False): Returns ------- True if export successul - """ + """ export_ok = True if path is None: epath = join(self.path, 'processed') @@ -30,8 +30,8 @@ def to_csv(self, path = None, forced_overwrite = False): for device in self.devices.keys(): export_ok &= self.devices[device].export(epath, forced_overwrite = forced_overwrite) - if export_ok: std_out(f'Test {self.full_name} exported successfully', 'SUCCESS') - else: std_out(f'Test {self.full_name} not exported successfully', 'ERROR') + if export_ok: logger.info(f'Test {self.full_name} exported successfully') + else: logger.error(f'Test {self.full_name} not exported successfully') return export_ok @@ -65,7 +65,7 @@ def to_html(self, title = 'Your title here', template = 'sc_template.html', path Whether to include a header or not Returns ---------- - rendered: + rendered: flask rendered template ''' @@ -74,12 +74,12 @@ def to_html(self, title = 'Your title here', template = 'sc_template.html', path if path is None: path = join(self.path, 'export') - if not exists(path): - std_out('Creating folder for test export') + if not exists(path): + logger.info('Creating folder for test export') makedirs(path) filename = join(path, f'{self.full_name}.html') - + docname = sub('.','_', self.full_name) app = flask.Flask(docname, template_folder = template_folder) @@ -97,7 +97,7 @@ def to_html(self, title = 'Your title here', template = 'sc_template.html', path with open(filename, 'w') as handle: handle.write(rendered) - - std_out (f'File saved to: {filename}', 'SUCCESS') + + logger.info (f'File saved to: {filename}') return filename, rendered From ce0507fce7d59757733e5ccf79da841348089e91 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:02:21 +0200 Subject: [PATCH 31/72] Add test checks and dispersion tools --- scdata/test/checks/__init__.py | 1 + scdata/test/checks/checks.py | 133 +++++++++++++++++++++++++++ scdata/test/dispersion/__init__.py | 1 + scdata/test/dispersion/dispersion.py | 112 ++++++++++++++++++++++ 4 files changed, 247 insertions(+) create mode 100644 scdata/test/checks/__init__.py create mode 100644 scdata/test/checks/checks.py create mode 100644 scdata/test/dispersion/__init__.py create mode 100644 scdata/test/dispersion/dispersion.py diff --git a/scdata/test/checks/__init__.py b/scdata/test/checks/__init__.py new file mode 100644 index 00000000..f5586d9d --- /dev/null +++ b/scdata/test/checks/__init__.py @@ -0,0 +1 @@ +from .checks import get_common_channels, gaps_check \ No newline at end of file diff --git a/scdata/test/checks/checks.py b/scdata/test/checks/checks.py new file mode 100644 index 00000000..4ec57556 --- /dev/null +++ b/scdata/test/checks/checks.py @@ -0,0 +1,133 @@ +from scdata.utils import logger +import matplotlib.pyplot as plt +import missingno as msno +from pandas import to_datetime, DataFrame +from scdata.test.plot.plot_tools import prepare_data +from scdata._config import config +from scdata.utils.dictmerge import dict_fmerge + +def gaps_check(self, devices = None, channels = None, groupby = 'channel', **kwargs): + if config.framework == 'jupyterlab': plt.ioff(); + plt.clf(); + + if 'formatting' not in kwargs: + logger.info('Using default formatting') + formatting = config._missingno_def_fmt + else: + formatting = dict_fmerge(config._missingno_def_fmt, kwargs['formatting']) + + # Get list of devices + if devices is None: + _devices = list(self.devices.keys()) + else: + _devices = devices + + if channels is None: + logger.error('Need some channels to check gaps for') + return + + if groupby == 'device': + + for device in _devices: + if device not in self.devices: + logger.warning('Device not found in test') + continue + msno.matrix(self.devices[device].readings) + + elif groupby == 'channel': + + for channel in channels: + traces = {"1": {"devices": _devices, "channel": channel, "subplot": 1}} + options = config._plot_def_opt + df, _ = prepare_data(self, traces, options) + fig, ax = plt.subplots(1, len(_devices), figsize=(formatting['width'], formatting['height'])) + for device in _devices: + if device not in self.devices: + logger.warning('Device not found in test') + continue + msno.matrix(df = DataFrame(df[f'{channel}_{device}']), sparkline=False, ax = ax[_devices.index(device)], fontsize=formatting['fontsize']) + ax[_devices.index(device)].set_yticks([i for i in range(len(df))], [i for i in df.index.values]) + plt.show() + +def get_common_channels(self, devices = None, ignore_missing_channels = False, pop_zero_readings_devices = False, detailed = False, verbose = True): + ''' + Convenience method get the common channels of the devices in the test + Params: + devices: list + None + List of devices to get common channels from. Passing None means 'all' + ignore_missing_channels: bool + False + In case there is a device with lower amount of channels, ignore the missing channels and keep going + pop_zero_readings_devices: bool + False + Remove devices from test that have no readings + verbose: bool + True + Print extra info + Returns: + List containing the common channels to all devices + ''' + + # Get list of devices + if devices is None: + list_devices = list(self.devices.keys()) + return_all = True + else: + list_devices = devices + return_all = False + + # Init list of common channels. Get just the first one + list_channels = self.devices[list_devices[0]].readings.columns + + # Extract list of common channels + len_channels = len(list_channels) + show_warning = False + channels_devices = {} + + for device in list_devices: + + if ignore_missing_channels: + # We don't reduce the list in case the new list is smaller + list_channels = list(set(list_channels) | set(self.devices[device].readings.columns)) + else: + # We reduce it + list_channels = list(set(list_channels) & set(self.devices[device].readings.columns)) + + channels_devices[device] = {len(self.devices[device].readings.columns)} + logger.info (f'Device {device}') + logger.info (f'Min reading at {self.devices[device].readings.index[0]}') + logger.info (f'Max reading at {self.devices[device].readings.index[-1]}') + logger.info (f'Number of dataframe points {len(self.devices[device].readings.index)}') + if detailed: + for column in list_channels: + logger.info ('\tColumn {}'.format(column)) + nas = self.devices[device].readings[column].isna() + logger.info ('\tNumber of nas {}'.format(nas.sum())) + + ## Eliminate devices with no points + if (len(self.devices[device].readings.index) == 0): + logger.warning (f'Device {device} for insufficient data points') + if pop_zero_readings_devices: self.devices.pop(device) + # Check the number of channels + elif len_channels != len(self.devices[device].readings.columns): + logger.warning(f"Device {device} has {len(self.devices[device].readings.columns)}. Current common channel length is {len_channels}") + len_channels = len(list_channels) + show_warning = True + if ignore_missing_channels: logger.warning ("Ignoring missing channels") + logger.error ('---------') + + if return_all: + + self.common_channels = list_channels + + logger.info(f'Final list of channels:\n {self.common_channels}') + if show_warning: + logger.warning (f'Some devices show less amount of sensors') + print (channels_devices) + + return self.common_channels + + else: + + return list_channels diff --git a/scdata/test/dispersion/__init__.py b/scdata/test/dispersion/__init__.py new file mode 100644 index 00000000..f0891b8e --- /dev/null +++ b/scdata/test/dispersion/__init__.py @@ -0,0 +1 @@ +from .dispersion import dispersion_analysis, dispersion_summary \ No newline at end of file diff --git a/scdata/test/dispersion/dispersion.py b/scdata/test/dispersion/dispersion.py new file mode 100644 index 00000000..d2481692 --- /dev/null +++ b/scdata/test/dispersion/dispersion.py @@ -0,0 +1,112 @@ +from scdata.utils import logger, localise_date +from pandas import DataFrame +from scdata._config import config + +def dispersion_analysis(self, devices = None, min_date = None, max_date = None, timezone = 'Europe/Madrid', smooth_window = 5): + ''' + Creates channels on a new dataframe for each device/channel combination, and makes the average/std of each + in a point-by-point fashion + + Parameters: + ----------- + devices: list + Default: None + If list of devices is None, then it will use all devices in self.devices + min_date: String + Default: None + Minimum date from which to perform the analysis + + max_date: String + Default: None + Maximum date from which to perform the analysis + + timezone: String + Default: None + Sensors for timezone + + smooth_window: int + Default: 5 + If not None, performs smoothing of the channels with rolling average. + + Returns: + --------- + + ''' + dispersion_df = DataFrame() + + # Get common channels for this group + if devices is not None: + common_ch = self.get_common_channels(devices = devices) + _devices = devices + else: + common_ch = self.get_common_channels() + _devices = self.devices + + # Localise dates + min_date = localise_date(min_date, timezone) + max_date = localise_date(max_date, timezone) + + # Calculate the dispersion for the sensors present in the dataset + warning = False + + for channel in common_ch: + columns = list() + + if channel in config._dispersion['ignore_channels']: continue + + for device in _devices: + if channel in self.devices[device].readings.columns and len(self.devices[device].readings.loc[:,channel]) >0: + # Important to resample and bfill for unmatching measures + if smooth_window is not None: + # channel_new = self.devices[device].readings[channel].resample('1Min').bfill().rolling(window=smooth_window).mean() + channel_new = self.devices[device].readings[channel].bfill().rolling(window=smooth_window).mean() + dispersion_df[channel + '-' + device] = channel_new[channel_new > 0] + else: + dispersion_df[channel + '-' + device] = self.devices[device].readings[channel].resample('1Min').bfill() + + columns.append(channel + '-' + device) + else: + logger.warning(f'Device {device} does not contain {channel}

') + warning = True + + dispersion_df.index = localise_date(dispersion_df.index, timezone) + + # Trim dataset to min and max dates (normally these tests are carried out with _minutes_ of differences) + if min_date is not None: dispersion_df = dispersion_df[dispersion_df.index > min_date] + if max_date is not None: dispersion_df = dispersion_df[dispersion_df.index < max_date] + + # Calculate Metrics + dispersion_df[channel + '_AVG'] = dispersion_df.loc[:,columns].mean(skipna=True, axis = 1) + dispersion_df[channel + '_STD'] = dispersion_df.loc[:,columns].std(skipna=True, axis = 1) + + if not warning: + logger.info(f'All devices have the provided channels list recorded') + else: + logger.warning(f'Missing channels, review data') + + if devices is None: + self.dispersion_df = dispersion_df + return self.dispersion_summary + + group_dispersion_summary = dict() + + for channel in common_ch: + if channel in config._dispersion['ignore_channels']: continue + # Calculate + group_dispersion_summary[channel] = dispersion_df[channel + '_STD'].mean() + + return group_dispersion_summary + +@property +def dispersion_summary(self): + self._dispersion_summary = dict() + + if self.dispersion_df is None: + logger.error('Perform dispersion analysis first!') + return None + for channel in self.common_channels: + if channel in config._dispersion['ignore_channels']: continue + # Calculate + self._dispersion_summary[channel] = self.dispersion_df[channel + '_STD'].mean() + + return self._dispersion_summary From bc3036bb9e5d6a282866d8c2c0a438b3e1cee271 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:03:39 +0200 Subject: [PATCH 32/72] Mostly remove api handlers --- scdata/io/device_api.py | 3536 +++++++++++++++++++-------------------- 1 file changed, 1768 insertions(+), 1768 deletions(-) diff --git a/scdata/io/device_api.py b/scdata/io/device_api.py index f79086d5..94671ce9 100644 --- a/scdata/io/device_api.py +++ b/scdata/io/device_api.py @@ -10,7 +10,7 @@ from geopy.distance import distance from scdata._config import config -from scdata.utils import std_out, localise_date, clean, get_elevation, url_checker, process_headers +from scdata.utils import logger, localise_date, clean, get_elevation, url_checker, process_headers # from tzwhere import tzwhere from timezonefinder import TimezoneFinder from datetime import date, datetime @@ -54,1811 +54,1811 @@ def default(self, obj): return obj.tolist() return super(NpEncoder, self).default(obj) -class ScApiDevice: - - API_BASE_URL='https://api.smartcitizen.me/v0/devices/' - - def __init__ (self, did): - - self.id = did # the number after https://smartcitizen.me/kits/###### - self.kit_id = None # the number that defines the type of blueprint - self.mac = None - self.last_reading_at = None - self.added_at = None - self.timezone = None - self.lat = None - self.long = None - self.alt = None - self.data = None - self.sensors = None - self.devicejson = None - self.postprocessing = None - self._url = f'https://smartcitizen.me/kits/{self.id}' - self._api_url = f'{self.API_BASE_URL}{self.id}' - - @property - def url(self): - return self._url - - @property - def api_url(self): - return self._api_url - - @staticmethod - # def new_device(name, kit_id = 26, location = None, exposure = 'indoor', user_tags = 'Lab, Research, Experimental', dry_run = False): - def new_device(name, location = {}, dry_run = False, **kwargs): - ''' - Creates a new device in the Smart Citizen Platform provided a name - Parameters - ---------- - name: string - Device name - location: dict, optional - None - location = { - 'longitude': longitude (double) – sensor east-west position, - 'latitude': latitude (double) – sensor north-south position, - 'altitude': altitude (double) – sensor height above sea level - } - dry_run: boolean - False - Post the payload to the API or just return it - **kwargs - ------ - kit_id: int, optional - 26 (SCK 2.1) - Kit ID - related to blueprint - exposure: string, optional - 'indoor' - Type of exposure ('indoor', 'outdoor') - user_tags: string - 'Lab, Research, Experimental' - User tags, comma separated - ----- - Returns - ------- - If dry_run, prints out a dict containing the payload and - returns False - If not, either False in case of error or a - dictionary containing: - id (int) – sensor identifier - message (string) – HTTP status text - http-status-code (int) – HTTP status code - ''' - - API_BASE_URL = 'https://api.smartcitizen.me/v0/devices' - - if 'SC_ADMIN_BEARER' not in environ: - std_out('Cannot post without Auth Bearer', 'ERROR') - return - - headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], 'Content-type': 'application/json'} - - # Set defaults - if 'kit_id' not in kwargs: - kit_id = 26 - else: kit_id = kwargs['kit_id'] - - if 'exposure' not in kwargs: - exposure = 'indoor' - else: exposure = kwargs['exposure'] - - if 'user_tags' not in kwargs: - user_tags = 'Lab, Research, Experimental' - else: user_tags = kwargs['user_tags'] - - payload = {} - try: - payload['name'] = name - except: - std_out('Your device needs a name!', 'ERROR') - sys.exit() - - payload['device_token'] = binascii.b2a_hex(urandom(3)).decode('utf-8') - payload['description'] = '' - payload['kit_id'] = kit_id - payload['latitude'] = location['latitude'] - payload['longitude'] = location['longitude'] - payload['exposure'] = exposure - payload['user_tags'] = user_tags - - if dry_run: - std_out(f'Dry run request to: {API_BASE_URL}sensors/configure') - print(dumps(payload, indent = 2)) - return False - - response = post(API_BASE_URL, data=dumps(payload), headers=headers) - - if response.status_code == 200 or response.status_code == 201: - if 'id' in response.json(): - platform_id = str(response.json()['id']) - platform_url = "https://smartcitizen.me/kits/" + platform_id - std_out(f'Device created with: \n{platform_url}', 'SUCCESS') - return response.json() - else: - std_out('Response does not contain id field') - - std_out(f'Error while creating new device, platform returned {response.status_code}', 'ERROR') - return False - - @staticmethod - def global_search(value = None, full = False): - """ - Gets devices from Smart Citizen API based on basic search query values, - searching both Users and Devices at the same time. - Global search documentation: https://developer.smartcitizen.me/#global-search - Parameters - ---------- - value: string - None - Query to fit - For null, not_null values, use 'null' or 'not_null' - full: bool - False - Returns a list with if False, or the whole dataframe if True - Returns - ------- - A list of kit IDs that comply with the requirements, or the full df, depending on full. - """ - - API_BASE_URL = "https://api.smartcitizen.me/v0/search?q=" - - # Value check - if value is None: std_out(f'Value needs a value, {value} supplied', 'ERROR'); return None - - url = API_BASE_URL + f'{value}' - - df = DataFrame() - isn = True - while isn: - try: - r = get(url) - # If status code OK, retrieve data - if r.status_code == 200 or r.status_code == 201: - h = process_headers(r.headers) - df = df.combine_first(DataFrame(r.json()).set_index('id')) - else: - std_out('API reported {}'.format(r.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - - if 'next' in h: - if h['next'] == url: isn = False - elif h['next'] != url: url = h['next'] - else: - isn = False - - if full: return df - else: return list(df.index) - - @staticmethod - def search_by_query(key = '', value = None, full = False): - """ - Gets devices from Smart Citizen API based on ransack parameters - Basic query documentation: https://developer.smartcitizen.me/#basic-searching - Parameters - ---------- - key: string - '' - Query key according to the basic query documentation. Some (not all) parameters are: - ['id', 'owner_id', 'name', 'description', 'mac_address', 'created_at', - 'updated_at', 'kit_id', 'geohash', 'last_recorded_at', 'uuid', 'state', - 'postprocessing_id', 'hardware_info'] - value: string - None - Query to fit - For null, not_null values, use 'null' or 'not_null' - full: bool - False - Returns a list with if False, or the whole dataframe if True - Returns - ------- - A list of kit IDs that comply with the requirements, or the full df, depending on full. - """ - - API_BASE_URL = "https://api.smartcitizen.me/v0/devices/" - - # Value check - if value is None: std_out(f'Value needs a value, {value} supplied', 'ERROR'); return None - - if value == 'null' or value == 'not_null': - url = API_BASE_URL + f'?q[{key}_{value}]=1' - else: - url = API_BASE_URL + f'?q[{key}]={value}' - - df = DataFrame() - isn = True - while isn: - try: - r = get(url) - # If status code OK, retrieve data - if r.status_code == 200 or r.status_code == 201: - h = process_headers(r.headers) - df = df.combine_first(DataFrame(r.json()).set_index('id')) - else: - std_out('API reported {}'.format(r.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - - if 'next' in h: - if h['next'] == url: isn = False - elif h['next'] != url: url = h['next'] - else: - isn = False - - if full: return df - else: return list(df.index) - - @staticmethod - def get_world_map(min_date = None, max_date = None, city = None, within = None, tags = None, tag_method = 'any', full = False): - """ - Gets devices from Smart Citizen API with certain requirements - Parameters - ---------- - min_date: string, datetime-like object, optional - None - Minimum date to filter out the devices. Device started posted before min_date - max_date: string, datetime-like object, optional - None - Maximum date to filter out the devices. Device posted after max_date - city: string, optional - Empty string - City - within: tuple - Empty tuple - Gets the devices within a circle center on lat, long with a radius_meters - within = tuple(lat, long, radius_meters) - tags: list of strings - None - Tags for the device (system or user). Default system wide are: indoor, outdoor, online, and offline - tag_method: string - 'any' - 'any' or 'all'. Checks if 'all' the tags are to be included in the tags or it could be any - full: bool - False - Returns a list with if False, or the whole dataframe if True - Returns - ------- - A list of kit IDs that comply with the requirements, or the full df, depending on full. - If no requirements are set, returns all of them - """ - - def is_within_circle(x, within): - if isnan(x['latitude']): return False - if isnan(x['longitude']): return False - - return distance((within[0], within[1]), (x['latitude'], x['longitude'])).m df['added_at'])] - if max_date is not None: df=df[(max_date < df['last_reading_at'])] - - # Location - if city is not None: df=df[(df['city']==city)] - if within is not None: - - df['within'] = df.apply(lambda x: is_within_circle(x, within), axis=1) - df=df[(df['within']==True)] - - # Tags - if tags is not None: - if tag_method == 'any': - df['has_tags'] = df.apply(lambda x: any(tag in x['system_tags']+x['user_tags'] for tag in tags), axis=1) - elif tag_method == 'all': - df['has_tags'] = df.apply(lambda x: all(tag in x['system_tags']+x['user_tags'] for tag in tags), axis=1) - df=df[(df['has_tags']==True)] - - if full: return df - else: return list(df.index) - - def get_mac(self, update = False): - if self.mac is None or update: - std_out(f'Requesting MAC from API for device {self.id}') - # Get device - try: - deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) - - # If status code OK, retrieve data - if deviceR.status_code == 200 or deviceR.status_code == 201: - if 'hardware_info' in deviceR.json().keys(): self.mac = deviceR.json()['hardware_info']['mac'] - std_out ('Device {} is has this MAC {}'.format(self.id, self.mac)) - else: - std_out('API reported {}'.format(deviceR.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - - return self.mac - - def get_device_json(self, update = False): - if self.devicejson is None or update: - try: - deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) - if deviceR.status_code == 429: - std_out('API reported {}. Retrying once'.format(deviceR.status_code), - 'WARNING') - sleep(30) - deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) - - if deviceR.status_code == 200 or deviceR.status_code == 201: - self.devicejson = deviceR.json() - else: - std_out('API reported {}'.format(deviceR.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - return self.devicejson - - def get_device_description(self, update = False): - if self.get_device_json(update) is not None: - return self.get_device_json()['kit']['description'] - return None - - def get_kit_ID(self, update = False): - - if self.kit_id is None or update: - if self.get_device_json(update) is not None: - self.kit_id = self.devicejson['kit']['id'] - - return self.kit_id - - def post_kit_ID(self): - ''' - Posts kit id to platform - ''' +# class ScApiDevice: + +# API_BASE_URL='https://api.smartcitizen.me/v0/devices/' + +# def __init__ (self, did): + +# self.id = did # the number after https://smartcitizen.me/kits/###### +# self.kit_id = None # the number that defines the type of blueprint +# self.mac = None +# self.last_reading_at = None +# self.added_at = None +# self.timezone = None +# self.lat = None +# self.long = None +# self.alt = None +# self.data = None +# self.sensors = None +# self.devicejson = None +# self.postprocessing = None +# self._url = f'https://smartcitizen.me/kits/{self.id}' +# self._api_url = f'{self.API_BASE_URL}{self.id}' + +# @property +# def url(self): +# return self._url + +# @property +# def api_url(self): +# return self._api_url + +# @staticmethod +# # def new_device(name, kit_id = 26, location = None, exposure = 'indoor', user_tags = 'Lab, Research, Experimental', dry_run = False): +# def new_device(name, location = {}, dry_run = False, **kwargs): +# ''' +# Creates a new device in the Smart Citizen Platform provided a name +# Parameters +# ---------- +# name: string +# Device name +# location: dict, optional +# None +# location = { +# 'longitude': longitude (double) – sensor east-west position, +# 'latitude': latitude (double) – sensor north-south position, +# 'altitude': altitude (double) – sensor height above sea level +# } +# dry_run: boolean +# False +# Post the payload to the API or just return it +# **kwargs +# ------ +# kit_id: int, optional +# 26 (SCK 2.1) +# Kit ID - related to blueprint +# exposure: string, optional +# 'indoor' +# Type of exposure ('indoor', 'outdoor') +# user_tags: string +# 'Lab, Research, Experimental' +# User tags, comma separated +# ----- +# Returns +# ------- +# If dry_run, prints out a dict containing the payload and +# returns False +# If not, either False in case of error or a +# dictionary containing: +# id (int) – sensor identifier +# message (string) – HTTP status text +# http-status-code (int) – HTTP status code +# ''' + +# API_BASE_URL = 'https://api.smartcitizen.me/v0/devices' + +# if 'SC_ADMIN_BEARER' not in environ: +# std_out('Cannot post without Auth Bearer', 'ERROR') +# return + +# headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], 'Content-type': 'application/json'} + +# # Set defaults +# if 'kit_id' not in kwargs: +# kit_id = 26 +# else: kit_id = kwargs['kit_id'] + +# if 'exposure' not in kwargs: +# exposure = 'indoor' +# else: exposure = kwargs['exposure'] + +# if 'user_tags' not in kwargs: +# user_tags = 'Lab, Research, Experimental' +# else: user_tags = kwargs['user_tags'] + +# payload = {} +# try: +# payload['name'] = name +# except: +# std_out('Your device needs a name!', 'ERROR') +# sys.exit() + +# payload['device_token'] = binascii.b2a_hex(urandom(3)).decode('utf-8') +# payload['description'] = '' +# payload['kit_id'] = kit_id +# payload['latitude'] = location['latitude'] +# payload['longitude'] = location['longitude'] +# payload['exposure'] = exposure +# payload['user_tags'] = user_tags + +# if dry_run: +# std_out(f'Dry run request to: {API_BASE_URL}sensors/configure') +# print(dumps(payload, indent = 2)) +# return False + +# response = post(API_BASE_URL, data=dumps(payload), headers=headers) + +# if response.status_code == 200 or response.status_code == 201: +# if 'id' in response.json(): +# platform_id = str(response.json()['id']) +# platform_url = "https://smartcitizen.me/kits/" + platform_id +# std_out(f'Device created with: \n{platform_url}', 'SUCCESS') +# return response.json() +# else: +# std_out('Response does not contain id field') + +# std_out(f'Error while creating new device, platform returned {response.status_code}', 'ERROR') +# return False + +# @staticmethod +# def global_search(value = None, full = False): +# """ +# Gets devices from Smart Citizen API based on basic search query values, +# searching both Users and Devices at the same time. +# Global search documentation: https://developer.smartcitizen.me/#global-search +# Parameters +# ---------- +# value: string +# None +# Query to fit +# For null, not_null values, use 'null' or 'not_null' +# full: bool +# False +# Returns a list with if False, or the whole dataframe if True +# Returns +# ------- +# A list of kit IDs that comply with the requirements, or the full df, depending on full. +# """ + +# API_BASE_URL = "https://api.smartcitizen.me/v0/search?q=" + +# # Value check +# if value is None: std_out(f'Value needs a value, {value} supplied', 'ERROR'); return None + +# url = API_BASE_URL + f'{value}' + +# df = DataFrame() +# isn = True +# while isn: +# try: +# r = get(url) +# # If status code OK, retrieve data +# if r.status_code == 200 or r.status_code == 201: +# h = process_headers(r.headers) +# df = df.combine_first(DataFrame(r.json()).set_index('id')) +# else: +# std_out('API reported {}'.format(r.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# if 'next' in h: +# if h['next'] == url: isn = False +# elif h['next'] != url: url = h['next'] +# else: +# isn = False + +# if full: return df +# else: return list(df.index) + +# @staticmethod +# def search_by_query(key = '', value = None, full = False): +# """ +# Gets devices from Smart Citizen API based on ransack parameters +# Basic query documentation: https://developer.smartcitizen.me/#basic-searching +# Parameters +# ---------- +# key: string +# '' +# Query key according to the basic query documentation. Some (not all) parameters are: +# ['id', 'owner_id', 'name', 'description', 'mac_address', 'created_at', +# 'updated_at', 'kit_id', 'geohash', 'last_recorded_at', 'uuid', 'state', +# 'postprocessing_id', 'hardware_info'] +# value: string +# None +# Query to fit +# For null, not_null values, use 'null' or 'not_null' +# full: bool +# False +# Returns a list with if False, or the whole dataframe if True +# Returns +# ------- +# A list of kit IDs that comply with the requirements, or the full df, depending on full. +# """ + +# API_BASE_URL = "https://api.smartcitizen.me/v0/devices/" + +# # Value check +# if value is None: std_out(f'Value needs a value, {value} supplied', 'ERROR'); return None + +# if value == 'null' or value == 'not_null': +# url = API_BASE_URL + f'?q[{key}_{value}]=1' +# else: +# url = API_BASE_URL + f'?q[{key}]={value}' + +# df = DataFrame() +# isn = True +# while isn: +# try: +# r = get(url) +# # If status code OK, retrieve data +# if r.status_code == 200 or r.status_code == 201: +# h = process_headers(r.headers) +# df = df.combine_first(DataFrame(r.json()).set_index('id')) +# else: +# std_out('API reported {}'.format(r.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# if 'next' in h: +# if h['next'] == url: isn = False +# elif h['next'] != url: url = h['next'] +# else: +# isn = False + +# if full: return df +# else: return list(df.index) + +# @staticmethod +# def get_world_map(min_date = None, max_date = None, city = None, within = None, tags = None, tag_method = 'any', full = False): +# """ +# Gets devices from Smart Citizen API with certain requirements +# Parameters +# ---------- +# min_date: string, datetime-like object, optional +# None +# Minimum date to filter out the devices. Device started posted before min_date +# max_date: string, datetime-like object, optional +# None +# Maximum date to filter out the devices. Device posted after max_date +# city: string, optional +# Empty string +# City +# within: tuple +# Empty tuple +# Gets the devices within a circle center on lat, long with a radius_meters +# within = tuple(lat, long, radius_meters) +# tags: list of strings +# None +# Tags for the device (system or user). Default system wide are: indoor, outdoor, online, and offline +# tag_method: string +# 'any' +# 'any' or 'all'. Checks if 'all' the tags are to be included in the tags or it could be any +# full: bool +# False +# Returns a list with if False, or the whole dataframe if True +# Returns +# ------- +# A list of kit IDs that comply with the requirements, or the full df, depending on full. +# If no requirements are set, returns all of them +# """ + +# def is_within_circle(x, within): +# if isnan(x['latitude']): return False +# if isnan(x['longitude']): return False + +# return distance((within[0], within[1]), (x['latitude'], x['longitude'])).m df['added_at'])] +# if max_date is not None: df=df[(max_date < df['last_reading_at'])] + +# # Location +# if city is not None: df=df[(df['city']==city)] +# if within is not None: + +# df['within'] = df.apply(lambda x: is_within_circle(x, within), axis=1) +# df=df[(df['within']==True)] + +# # Tags +# if tags is not None: +# if tag_method == 'any': +# df['has_tags'] = df.apply(lambda x: any(tag in x['system_tags']+x['user_tags'] for tag in tags), axis=1) +# elif tag_method == 'all': +# df['has_tags'] = df.apply(lambda x: all(tag in x['system_tags']+x['user_tags'] for tag in tags), axis=1) +# df=df[(df['has_tags']==True)] + +# if full: return df +# else: return list(df.index) + +# def get_mac(self, update = False): +# if self.mac is None or update: +# std_out(f'Requesting MAC from API for device {self.id}') +# # Get device +# try: +# deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) + +# # If status code OK, retrieve data +# if deviceR.status_code == 200 or deviceR.status_code == 201: +# if 'hardware_info' in deviceR.json().keys(): self.mac = deviceR.json()['hardware_info']['mac'] +# std_out ('Device {} is has this MAC {}'.format(self.id, self.mac)) +# else: +# std_out('API reported {}'.format(deviceR.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# return self.mac + +# def get_device_json(self, update = False): +# if self.devicejson is None or update: +# try: +# deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) +# if deviceR.status_code == 429: +# std_out('API reported {}. Retrying once'.format(deviceR.status_code), +# 'WARNING') +# sleep(30) +# deviceR = get(self.API_BASE_URL + '{}/'.format(self.id)) + +# if deviceR.status_code == 200 or deviceR.status_code == 201: +# self.devicejson = deviceR.json() +# else: +# std_out('API reported {}'.format(deviceR.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass +# return self.devicejson + +# def get_device_description(self, update = False): +# if self.get_device_json(update) is not None: +# return self.get_device_json()['kit']['description'] +# return None + +# def get_kit_ID(self, update = False): + +# if self.kit_id is None or update: +# if self.get_device_json(update) is not None: +# self.kit_id = self.devicejson['kit']['id'] + +# return self.kit_id + +# def post_kit_ID(self): +# ''' +# Posts kit id to platform +# ''' - if 'SC_ADMIN_BEARER' not in environ: - std_out('Cannot post without Auth Admin Bearer', 'ERROR') - return +# if 'SC_ADMIN_BEARER' not in environ: +# std_out('Cannot post without Auth Admin Bearer', 'ERROR') +# return - headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], 'Content-type': 'application/json'} +# headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], 'Content-type': 'application/json'} - if self.kit_id is not None: +# if self.kit_id is not None: - payload = {'kit_id': self.kit_id} +# payload = {'kit_id': self.kit_id} - payload_json = dumps(payload) - response = patch(f'{self.API_BASE_URL}{self.id}', - data = payload_json, headers = headers) +# payload_json = dumps(payload) +# response = patch(f'{self.API_BASE_URL}{self.id}', +# data = payload_json, headers = headers) - if response.status_code == 200 or response.status_code == 201: - std_out(f'Kit ID for device {self.id} was updated to {self.kit_id}', 'SUCCESS') - return True +# if response.status_code == 200 or response.status_code == 201: +# std_out(f'Kit ID for device {self.id} was updated to {self.kit_id}', 'SUCCESS') +# return True - std_out(f'Problem while updating kit ID for device {self.id}') +# std_out(f'Problem while updating kit ID for device {self.id}') - return False +# return False - def get_device_last_reading(self, update = False): +# def get_device_last_reading(self, update = False): - if self.last_reading_at is None or update: - if self.get_device_json(update) is not None and self.get_device_json(update)['state'] != 'never_published': - self.last_reading_at = localise_date(self.devicejson['last_reading_at'], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# if self.last_reading_at is None or update: +# if self.get_device_json(update) is not None and self.get_device_json(update)['state'] != 'never_published': +# self.last_reading_at = localise_date(self.devicejson['last_reading_at'], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - std_out ('Device {} has last reading at {}'.format(self.id, self.last_reading_at)) +# std_out ('Device {} has last reading at {}'.format(self.id, self.last_reading_at)) - return self.last_reading_at +# return self.last_reading_at - def get_device_added_at(self, update = False): +# def get_device_added_at(self, update = False): - if self.added_at is None or update: - if self.get_device_json(update) is not None: - self.added_at = localise_date(self.devicejson['added_at'], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# if self.added_at is None or update: +# if self.get_device_json(update) is not None: +# self.added_at = localise_date(self.devicejson['added_at'], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - std_out ('Device {} was added at {}'.format(self.id, self.added_at)) +# std_out ('Device {} was added at {}'.format(self.id, self.added_at)) - return self.added_at +# return self.added_at - def get_device_postprocessing(self, update = False): +# def get_device_postprocessing(self, update = False): - if self.postprocessing is None or update: - if self.get_device_json(update) is not None: - self.postprocessing = self.devicejson['postprocessing'] +# if self.postprocessing is None or update: +# if self.get_device_json(update) is not None: +# self.postprocessing = self.devicejson['postprocessing'] - if self.postprocessing is not None: - # Check the url in hardware - if 'hardware_url' in self.postprocessing: - urls = url_checker(self.postprocessing['hardware_url']) - # If URL is empty, try prepending base url from config - if not urls: - tentative_url = f"{config._base_postprocessing_url}hardware/{self.postprocessing['hardware_url']}.{config._default_file_type}" - else: - if len(urls)>1: std_out('URLs for postprocessing recipe are more than one, trying first', 'WARNING') - tentative_url = urls[0] +# if self.postprocessing is not None: +# # Check the url in hardware +# if 'hardware_url' in self.postprocessing: +# urls = url_checker(self.postprocessing['hardware_url']) +# # If URL is empty, try prepending base url from config +# if not urls: +# tentative_url = f"{config._base_postprocessing_url}hardware/{self.postprocessing['hardware_url']}.{config._default_file_type}" +# else: +# if len(urls)>1: std_out('URLs for postprocessing recipe are more than one, trying first', 'WARNING') +# tentative_url = urls[0] - self.postprocessing['hardware_url'] = tentative_url +# self.postprocessing['hardware_url'] = tentative_url - std_out ('Device {} has postprocessing information:\n{}'.format(self.id, self.postprocessing)) - else: - std_out (f'Device {self.id} has no postprocessing information') +# std_out ('Device {} has postprocessing information:\n{}'.format(self.id, self.postprocessing)) +# else: +# std_out (f'Device {self.id} has no postprocessing information') - return self.postprocessing +# return self.postprocessing - def get_device_timezone(self, update = False): +# def get_device_timezone(self, update = False): - if self.timezone is None or update: - latitude, longitude = self.get_device_lat_long(update) - # Localize it +# if self.timezone is None or update: +# latitude, longitude = self.get_device_lat_long(update) +# # Localize it - if latitude is not None and longitude is not None: - # self.timezone = tz_where.tzNameAt(latitude, longitude, forceTZ=True) - self.timezone = tf.timezone_at(lng=longitude, lat=latitude) - std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) +# if latitude is not None and longitude is not None: +# # self.timezone = tz_where.tzNameAt(latitude, longitude, forceTZ=True) +# self.timezone = tf.timezone_at(lng=longitude, lat=latitude) +# std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) - return self.timezone +# return self.timezone - def get_device_lat_long(self, update = False): +# def get_device_lat_long(self, update = False): - if self.lat is None or self.long is None or update: - if self.get_device_json(update) is not None: - latidude = longitude = None - if 'location' in self.devicejson.keys(): - latitude, longitude = self.devicejson['location']['latitude'], self.devicejson['location']['longitude'] - elif 'data' in self.devicejson.keys(): - if 'location' in self.devicejson['data'].keys(): - latitude, longitude = self.devicejson['data']['location']['latitude'], self.devicejson['data']['location']['longitude'] +# if self.lat is None or self.long is None or update: +# if self.get_device_json(update) is not None: +# latidude = longitude = None +# if 'location' in self.devicejson.keys(): +# latitude, longitude = self.devicejson['location']['latitude'], self.devicejson['location']['longitude'] +# elif 'data' in self.devicejson.keys(): +# if 'location' in self.devicejson['data'].keys(): +# latitude, longitude = self.devicejson['data']['location']['latitude'], self.devicejson['data']['location']['longitude'] - self.lat = latitude - self.long = longitude +# self.lat = latitude +# self.long = longitude - std_out ('Device {} is located at {}, {}'.format(self.id, self.lat, self.long)) +# std_out ('Device {} is located at {}, {}'.format(self.id, self.lat, self.long)) - return (self.lat, self.long) +# return (self.lat, self.long) - def get_device_alt(self, update = False): +# def get_device_alt(self, update = False): - if self.lat is None or self.long is None: - self.get_device_lat_long(update) +# if self.lat is None or self.long is None: +# self.get_device_lat_long(update) - if self.alt is None or update: - self.alt = get_elevation(_lat = self.lat, _long = self.long) +# if self.alt is None or update: +# self.alt = get_elevation(_lat = self.lat, _long = self.long) - std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) +# std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) - return self.alt +# return self.alt - def get_device_sensors(self, update = False): +# def get_device_sensors(self, update = False): - if self.sensors is None or update: - if self.get_device_json(update) is not None: - # Get available sensors in platform - sensors = self.devicejson['data']['sensors'] +# if self.sensors is None or update: +# if self.get_device_json(update) is not None: +# # Get available sensors in platform +# sensors = self.devicejson['data']['sensors'] - # Put the ids and the names in lists - self.sensors = dict() - for sensor in sensors: - for key in config.names['sc_sensor_names']: - if str(config.names['sc_sensor_names'][key]['id']) == str(sensor['id']): - # IDs are unique - if key in config._sc_ignore_keys: continue - self.sensors[sensor['id']] = key +# # Put the ids and the names in lists +# self.sensors = dict() +# for sensor in sensors: +# for key in config.names['sc_sensor_names']: +# if str(config.names['sc_sensor_names'][key]['id']) == str(sensor['id']): +# # IDs are unique +# if key in config._sc_ignore_keys: continue +# self.sensors[sensor['id']] = key - return self.sensors +# return self.sensors - def convert_rollup(self, frequency): - # Convert frequency from pandas to API's - for index, letter in enumerate(frequency): - try: - aux = int(letter) - except: - index_first = index - letter_first = letter - rollup_value = frequency[:index_first] - frequency_unit = frequency[index_first:] - break - - for item in config._freq_conv_lut: - if item[1] == frequency_unit: - rollup_unit = item[0] - break - - rollup = rollup_value + rollup_unit - return rollup - - def get_device_data(self, min_date = None, max_date = None, frequency = '1Min', clean_na = None, resample = True): - - if 'SC_ADMIN_BEARER' in environ: - std_out('Admin Bearer found, using it', 'SUCCESS') - - headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER']} - else: - headers = None - std_out('Admin Bearer not found', 'WARNING') - - std_out(f'Requesting data from SC API') - std_out(f'Device ID: {self.id}') - - rollup = self.convert_rollup(frequency) - std_out(f'Using rollup: {rollup}') - - # Make sure we have the everything we need beforehand - self.get_device_sensors() - self.get_device_timezone() - self.get_device_last_reading() - self.get_device_added_at() - self.get_kit_ID() - - if self.timezone is None: - std_out('Device does not have timezone set, skipping', 'WARNING') - return None - - # Check start date and end date - # Converting to UTC by passing None - # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.tz_convert.html - if min_date is not None: - min_date = localise_date(to_datetime(min_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - std_out (f'Min Date: {min_date}') - else: - min_date = localise_date(to_datetime('2001-01-01'), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - std_out(f"No min_date specified") - - if max_date is not None: - max_date = localise_date(to_datetime(max_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') - std_out (f'Max Date: {max_date}') - - # Trim based on actual data available - if min_date is not None and self.last_reading_at is not None: - if min_date > self.last_reading_at: - std_out(f'Device request would yield empty data (min_date). Returning', 'WARNING') - return None - - if max_date is not None and self.added_at is not None: - if max_date < self.added_at: - std_out(f'Device request would yield empty data (max_date). Returning', 'WARNING') - return None - - if max_date is not None and self.last_reading_at is not None: - if max_date > self.last_reading_at: - std_out('Trimming max_date to last reading', 'WARNING') - max_date = self.last_reading_at - - # Print stuff - std_out('Kit ID: {}'.format(self.kit_id)) - std_out(f'Device timezone: {self.timezone}') - if not self.sensors.keys(): - std_out(f'Device is empty') - return None - else: std_out(f'Sensor IDs: {list(self.sensors.keys())}') - - df = DataFrame() - std_out(f'Requesting from {min_date} to {max_date}') - - # Get devices in the sensor first - for sensor_id in self.sensors.keys(): - - # Request sensor per ID - request = self.API_BASE_URL + '{}/readings?'.format(self.id) - - if min_date is not None: request += f'from={min_date}' - if max_date is not None: request += f'&to={max_date}' - - request += f'&rollup={rollup}' - request += f'&sensor_id={sensor_id}' - request += '&function=avg' - - # Make request - response = get(request, headers = headers) - - # Retry once in case of 429 after 30s - if response.status_code == 429: - std_out('Too many requests, waiting for 1 more retry', 'WARNING') - sleep (30) - response = get(request, headers = headers) - - flag_error = False - try: - sensorjson = response.json() - except: - std_out(f'Problem with json data from API, {response.status_code}', 'ERROR') - flag_error = True - pass - continue - - if 'readings' not in sensorjson.keys(): - std_out(f'No readings key in request for sensor: {sensor_id} ({self.sensors[sensor_id]})', 'ERROR') - flag_error = True - continue - - elif sensorjson['readings'] == []: - std_out(f'No data in request for sensor: {sensor_id} ({self.sensors[sensor_id]})', 'WARNING') - flag_error = True - continue - - if flag_error: continue - - try: - dfsensor = DataFrame(sensorjson['readings']).set_index(0) - dfsensor.columns = [self.sensors[sensor_id]] - dfsensor.index = localise_date(dfsensor.index, self.timezone) - dfsensor.sort_index(inplace=True) - dfsensor = dfsensor[~dfsensor.index.duplicated(keep='first')] - - # Drop unnecessary columns - dfsensor.drop([i for i in dfsensor.columns if 'Unnamed' in i], axis=1, inplace=True) - # Check for weird things in the data - dfsensor = dfsensor.astype(float, errors='ignore') - # dfsensor = dfsensor.apply(to_numeric, errors='coerce') - # Resample - if (resample): - dfsensor = dfsensor.resample(frequency).mean() - df = df.combine_first(dfsensor) - except: - print_exc() - std_out('Problem with sensor data from API', 'ERROR') - flag_error = True - pass - continue - - try: - df = df.reindex(df.index.rename('TIME')) - df = clean(df, clean_na, how = 'all') - self.data = df - - except: - std_out('Problem closing up the API dataframe', 'ERROR') - pass - return None - - if flag_error == False: std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') - return self.data - - def post_device_data(self, clean_na = 'drop', chunk_size = 500): - ''' - POST self.data in the SmartCitizen API - Parameters - ---------- - clean_na: string, optional - 'drop' - 'drop', 'fill' - chunk_size: integer - chunk size to split resulting pandas DataFrame for posting readings - Returns - ------- - True if the data was posted succesfully - ''' - if self.data is None: - std_out('No data to post, ignoring', 'ERROR') - return False - - if 'SC_BEARER' not in environ: - std_out('Cannot post without Auth Bearer', 'ERROR') - return False - - if 'SC_ADMIN_BEARER' in environ: - std_out('Using admin Bearer') - bearer = environ['SC_ADMIN_BEARER'] - else: - bearer = environ['SC_BEARER'] - - headers = {'Authorization':'Bearer ' + bearer, 'Content-type': 'application/json'} - post_ok = True - - for sensor_id in self.sensors: - df = DataFrame(self.data[self.sensors[sensor]]).copy() - post_ok &= self.post_data_to_device(df, clean_na = clean_na, chunk_size = chunk_size) - - return post_ok - - def post_data_to_device(self, df, clean_na = 'drop', chunk_size = 500, dry_run = False, max_retries = 2): - ''' - POST external pandas.DataFrame to the SmartCitizen API - Parameters - ---------- - df: pandas DataFrame - Contains data in a DataFrame format. - Data is posted using the column names of the dataframe - Data is posted in UTC TZ so dataframe needs to have located - timestamp - clean_na: string, optional - 'drop' - 'drop', 'fill' - chunk_size: integer - chunk size to split resulting pandas DataFrame for posting readings - dry_run: boolean - False - Post the payload to the API or just return it - max_retries: int - 2 - Maximum number of retries per chunk - Returns - ------- - True if the data was posted succesfully - ''' - if 'SC_BEARER' not in environ: - std_out('Cannot post without Auth Bearer', 'ERROR') - return False - - if 'SC_ADMIN_BEARER' in environ: - std_out('Using admin Bearer') - bearer = environ['SC_ADMIN_BEARER'] - else: - bearer = environ['SC_BEARER'] - - headers = {'Authorization':'Bearer ' + bearer, 'Content-type': 'application/json'} - - # Clean df of nans - df = clean(df, clean_na, how = 'all') - std_out(f'Posting columns to {self.API_BASE_URL}') - std_out(f'{list(df.columns)}') - df.index.name = 'recorded_at' - - # Split the dataframe in chunks - std_out(f'Splitting post in chunks of size {chunk_size}') - chunked_dfs = [df[i:i+chunk_size] for i in range(0, df.shape[0], chunk_size)] - - for i in trange(len(chunked_dfs), file=sys.stdout, - desc=f"Posting data for {self.id}..."): - - chunk = chunked_dfs[i].copy() - - # Prepare json post - payload = {"data":[]} - for item in chunk.index: - payload["data"].append( - { - "recorded_at": localise_date(item, 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ'), - "sensors": [{ - "id": column, - "value": chunk.loc[item, column] - } for column in chunk.columns if not isnan(chunk.loc[item, column])] - } - ) - - if dry_run: - std_out(f'Dry run request to: {self.API_BASE_URL}{self.id}/readings for chunk ({i+1}/{len(chunked_dfs)})') - return dumps(payload, indent = 2, cls = NpEncoder) - - post_ok = False - retries = 0 - - while post_ok == False and retries < max_retries: - response = post(f'{self.API_BASE_URL}{self.id}/readings', - data = dumps(payload, cls = NpEncoder), headers = headers) - - if response.status_code == 200 or response.status_code == 201: - post_ok = True - break - else: - retries += 1 - std_out (f'Chunk ({i+1}/{len(chunked_dfs)}) post failed. \ - API responded {response.status_code}.\ - Retrying ({retries}/{max_retries}', 'WARNING') - - if (not post_ok) or (retries == max_retries): - std_out (f'Chunk ({i+1}/{len(chunked_dfs)}) post failed. \ - API responded {response.status_code}.\ - Reached max_retries', 'ERROR') - return False - - return True - - def patch_postprocessing(self, dry_run = False): - ''' - POST postprocessing info into the device in the SmartCitizen API - Updates all the post info. Changes need to be made info the keys of the postprocessing outside of here - - # Example postprocessing: - # { - # "blueprint_url": "https://github.com/fablabbcn/smartcitizen-data/blob/master/blueprints/sc_21_station_module.json", - # "hardware_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/master/hardware/SCAS210001.json", - # "latest_postprocessing": "2020-10-29T08:35:23Z" - # } - ''' - - if 'SC_ADMIN_BEARER' not in environ: - std_out('Cannot post without Admin Auth Bearer', 'ERROR') - return - - headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], - 'Content-type': 'application/json'} - - post = {"postprocessing_attributes": self.postprocessing} - post_json = dumps(post) - - if dry_run: - std_out(f'Dry run request to: {self.API_BASE_URL}{self.id}/') - return dumps(post_json, indent = 2) - - std_out(f'Posting postprocessing_attributes:\n {post_json}') - response = patch(f'{self.API_BASE_URL}{self.id}/', - data = post_json, headers = headers) - - if response.status_code == 200 or response.status_code == 201: - std_out(f"Postprocessing posted", "SUCCESS") - return True - else: - std_out(f"API responded with {response.status_code}") - - return False - -class MuvApiDevice: - - API_BASE_URL='https://data.waag.org/api/muv/' - - def __init__ (self, did): - self.id = did - self.timezone = None - self.data = None - self.sensors = None - - def get_device_timezone(self): - self.timezone = 'Europe/Madrid' - return self.timezone - - def get_device_sensors(self): - if self.sensors is None: - self.sensors = dict() - for key in config.blueprints: - if 'muv' not in key: continue - if 'sensors' in config.blueprints[key]: - for sensor_name in config.blueprints[key]['sensors'].keys(): - # IDs are unique - self.sensors[config.blueprints[key]['sensors'][sensor_name]['id']] = sensor_name - return self.sensors - - def get_device_data(self, min_date = None, max_date = None, frequency = '3Min', clean_na = None, resample = True): - - if min_date is not None: days_ago = (to_datetime(date.today())-to_datetime(min_date)).days - else: days_ago = 365 # One year of data - - std_out(f'Requesting data from MUV API') - std_out(f'Device ID: {self.id}') - self.get_device_timezone() - self.get_device_sensors() - - # Get devices - try: - if days_ago == -1: url = f'{self.API_BASE_URL}getSensorData?sensor_id={self.id}' - else: url = f'{self.API_BASE_URL}getSensorData?sensor_id={self.id}&days={days_ago}' - df = DataFrame(get(url).json()) - except: - print_exc() - std_out('Failed sensor request request. Probably no connection', 'ERROR') - pass - return None - - try: - # Rename columns - df.rename(columns = self.sensors, inplace = True) - df = df.set_index('time') - - df.index = localise_date(df.index, self.timezone) - df = df[~df.index.duplicated(keep='first')] - # Drop unnecessary columns - df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) - df.drop('id', axis=1, inplace=True) - # Check for weird things in the data - df = df.apply(to_numeric, errors='coerce') - # # Resample - if (resample): - df = df.resample(frequency).mean() - df = df.reindex(df.index.rename('TIME')) - - df = clean(df, clean_na, how = 'all') - - self.data = df - - except: - print_exc() - std_out('Problem closing up the API dataframe', 'ERROR') - pass - return None - - std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') - return self.data - -class DadesObertesApiDevice: - - API_BASE_URL="https://analisi.transparenciacatalunya.cat/resource/tasf-thgu.json" - - def __init__ (self, did = None, within = None): - if did is None and within is None: - std_out('Specify either station id (=codi_eoi) or within (=(lat, long, radius_meters))') - return - - if did is not None: self.id = did - if within is not None: self.id = self.get_id_from_within(within) - - self.timezone = None - self.data = None - self.sensors = None - self.devicejson = None - self.lat = None - self.long = None - self.alt = None - self.timezone = None - - @staticmethod - def get_world_map(city = None, within = None, station_type = None, area_type = None, full = False): - """ - Gets devices from Dades Obertes API with certain requirements - Parameters - ---------- - city: string, optional - Empty string - City - within: tuple - Empty tuple - Gets the devices within a circle center on lat, long with a radius_meters - within = tuple(lat, long, radius_meters) - station_type: string - None - Type of station, to choose from: 'background', nan or 'traffic' - area_type: string - None - Type of area, to choose from: nan, 'peri-urban', 'rural', 'suburban', 'urban' - full: bool - False - Return full dataframe or not - Returns - ------- - A list of eoi codes that comply with the requirements. If no requirements are set, returns all of them - """ - def is_within_circle(x, within): - if isnan(x['latitud']): return False - if isnan(x['longitud']): return False - - return distance(location_A=(within[0], within[1]), location_B=(x['latitude'], x['longitude'])).m < within[2] - - world_map = get("https://analisi.transparenciacatalunya.cat/resource/tasf-thgu.json") - df = read_json(StringIO(world_map.content.decode('utf-8'))).set_index('codi_eoi') - - # Location - if city is not None: df=df[(df['municipi']==city)] - if within is not None: - - df['within'] = df.apply(lambda x: is_within_circle(x, within), axis=1) - df=df[(df['within']==True)] - - # Station type - if station_type is not None: df=df[(df['tipus_estacio']==station_type)] - # Area type - if area_type is not None: df=df[(df['area_urbana']==area_type)] - - if full: return df - return list(set(list(df.index))) - - def get_id_from_within(self, within): - ''' - Gets the stations within a radius in meters. - within = tuple(lat, long, radius_meters) - ''' - request = self.API_BASE_URL - request += f'$where=within_circle(geocoded_column,{within[0]},{within[1]},{within[2]})' - - try: - s = get(request) - except: - std_out('Problem with request from API', 'ERROR') - return None - - if s.status_code == 200 or s.status_code == 201: - df = read_json(StringIO(s.content.decode('utf-8'))) - else: - std_out('API reported {}'.format(s.status_code), 'ERROR') - return None - - if 'codi_eoi' in df.columns: - ids = list(set(df.codi_eoi.values)) - if ids == []: std_out('No stations within range', 'ERROR') - elif len(ids) > 1: - for ptid in ids: - municipi = next(iter(set(df[df.codi_eoi==ptid].municipi.values))) - nom_estacio = next(iter(set(df[df.codi_eoi==ptid].nom_estacio.values))) - area_urbana = next(iter(set(df[df.codi_eoi==ptid].area_urbana.values))) - tipus_estacio = next(iter(set(df[df.codi_eoi==ptid].tipus_estacio.values))) - - std_out(f'{ids.index(ptid)+1} /- {ptid} --- {municipi} - {nom_estacio} - Type: {area_urbana} - {tipus_estacio}') - - wptid = int(input('Multiple stations found, please select one: ')) - 1 - - devid = ids[wptid] - std_out(f'Selected station in {next(iter(set(df[df.codi_eoi==devid].municipi.values)))} with codi_eoi={devid}') - else: - devid = ids[0] - municipi = next(iter(set(df[df.codi_eoi==devid].municipi.values))) - nom_estacio = next(iter(set(df[df.codi_eoi==devid].nom_estacio.values))) - area_urbana = next(iter(set(df[df.codi_eoi==devid].area_urbana.values))) - tipus_estacio = next(iter(set(df[df.codi_eoi==devid].tipus_estacio.values))) - std_out(f'Found station in {next(iter(set(df[df.codi_eoi==devid].municipi.values)))} with codi_eoi={devid}') - std_out(f'Found station in {municipi} - {nom_estacio} - {devid} - Type: {area_urbana} - {tipus_estacio}') - - else: - std_out('Data is empty', 'ERROR') - return None - - return devid - - def get_device_sensors(self): - - if self.sensors is None: - if self.get_device_json() is not None: - # Get available sensors - sensors = list(set(self.devicejson.contaminant)) - - # Put the ids and the names in lists - self.sensors = dict() - for sensor in sensors: - for key in config.blueprints: - if not search("csic_station",key): continue - if 'sensors' in config.blueprints[key]: - for sensor_name in config.blueprints[key]['sensors'].keys(): - if config.blueprints[key]['sensors'][sensor_name]['id'] == str(sensor): - # IDs are unique - self.sensors[sensor] = sensor_name - - return self.sensors - - def get_device_json(self): - - if self.devicejson is None: - try: - s = get(self.API_BASE_URL + f'/?codi_eoi={self.id}') - if s.status_code == 200 or s.status_code == 201: - self.devicejson = read_json(StringIO(s.content.decode('utf-8'))) - else: - std_out('API reported {}'.format(s.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - - return self.devicejson - - def get_device_timezone(self): - - if self.timezone is None: - latitude, longitude = self.get_device_lat_long() - # Localize it - # self.timezone = tz_where.tzNameAt(latitude, longitude) - self.timezone = tf.timezone_at(lng=longitude, lat=latitude) - - std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) - - return self.timezone - - def get_device_alt(self, update = False): - - if self.lat is None or self.long is None: - self.get_device_lat_long(update) - - if self.alt is None or update: - self.alt = get_elevation(_lat = self.lat, _long = self.long) - - std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) - - return self.alt - - def get_device_lat_long(self): - - if self.lat is None or self.long is None: - if self.get_device_json() is not None: - latitude = longitude = None - if 'latitud' in self.devicejson.columns: - latitude = next(iter(set(self.devicejson.latitud))) - longitude = next(iter(set(self.devicejson.longitud))) - - self.lat = latitude - self.long = longitude - - std_out ('Device {} is located at {}, {}'.format(self.id, latitude, longitude)) - - return (self.lat, self.long) - - def get_device_data(self, min_date = None, max_date = None, frequency = '1H', clean_na = None, resample = True): - ''' - Based on code snippet from Marc Roig: - # I2CAT RESEARCH CENTER - BARCELONA - MARC ROIG (marcroig@i2cat.net) - ''' - - std_out(f'Requesting data from Dades Obertes API') - std_out(f'Device ID: {self.id}') - self.get_device_sensors() - self.get_device_timezone() - - request = self.API_BASE_URL - request += f'/?codi_eoi={self.id}' - - if min_date is not None and max_date is not None: - request += "&$where=data between " + to_datetime(min_date).strftime("'%Y-%m-%dT%H:%M:%S'") \ - + " and " + to_datetime(max_date).strftime("'%Y-%m-%dT%H:%M:%S'") - elif min_date is not None: - request += "&$where=data >= " + to_datetime(min_date).strftime("'%Y-%m-%dT%H:%M:%S'") - elif max_date is not None: - request += "&$where=data < " + to_datetime(max_date).strftime("'%Y-%m-%dT%H:%M:%S'") - - try: - s = get(request) - except: - print_exc() - std_out('Problem with sensor data from API', 'ERROR') - pass - return None - - if s.status_code == 200 or s.status_code == 201: - df = read_json(StringIO(s.content.decode('utf-8'))) - else: - std_out('API reported {}'.format(s.status_code), 'ERROR') - pass - return None - - # Filter columns - measures = ['h0' + str(i) for i in range(1,10)] - measures += ['h' + str(i) for i in range(10,25)] - # validations = ['v0' + str(i) for i in range(1,10)] - # validations += ['v' + str(i) for i in range(10,25)] - new_measures_names = list(range(1,25)) - - columns = ['contaminant', 'data'] + measures# + validations - try: - df_subset = df[columns] - df_subset.columns = ['contaminant', 'date'] + new_measures_names - except: - print_exc() - std_out('Problem while filtering columns', 'Error') - return None - else: - std_out('Successful filtering', 'SUCCESS') - - # Pivot - try: - df = DataFrame([]) - for contaminant in self.sensors.keys(): - if contaminant not in df_subset['contaminant'].values: - std_out(f'{contaminant} not in columns. Skipping', 'WARNING') - continue - df_temp= df_subset.loc[df_subset['contaminant']==contaminant].drop('contaminant', 1).set_index('date').unstack().reset_index() - df_temp.columns = ['hours', 'date', contaminant] - df_temp['date'] = to_datetime(df_temp['date']) - timestamp_lambda = lambda x: x['date'] + DateOffset(hours=int(x['hours'])) - df_temp['date'] = df_temp.apply( timestamp_lambda, axis=1) - df_temp = df_temp.set_index('date') - df[contaminant] = df_temp[contaminant] - except: - # print_exc() - std_out('Problem while filtering columns', 'Error') - pass - return None - else: - std_out('Successful pivoting', 'SUCCESS') - - df.index = to_datetime(df.index).tz_localize('UTC').tz_convert(self.timezone) - df.sort_index(inplace=True) - - # Rename - try: - df.rename(columns=self.sensors, inplace=True) - except: - # print_exc() - std_out('Problem while renaming columns', 'Error') - pass - return None - else: - std_out('Successful renaming', 'SUCCESS') - - # Clean - df = df[~df.index.duplicated(keep='first')] - # Drop unnecessary columns - df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) - # Check for weird things in the data - df = df.apply(to_numeric, errors='coerce') - # Resample - if (resample): - df = df.resample(frequency).mean() - - try: - df = df.reindex(df.index.rename('TIME')) - df = clean(df, clean_na, how = 'all') - self.data = df - except: - std_out('Problem closing up the API dataframe', 'ERROR') - pass - return None - - std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') - return self.data - -class NiluApiDevice(object): - """docstring for IflinkDevice""" - API_BASE_URL='https://sensors.nilu.no/api/' - API_CONNECTOR='sensors.nilu.no' - - # Docs - # https://sensors.nilu.no/api/doc#configure-sensor-schema - # https://sensors.nilu.no/api/doc#push--sensor-data-by-id - - def __init__ (self, did): - - self.id = did - self.timezone = None - self.lat = None - self.long = None - self.alt = None - self.data = None - self.sensors = None - self.devicejson = None - self.last_reading_at = None - self.added_at = None - self._api_url = self.API_BASE_URL + f'sensors/{self.id}' - - @property - def api_url(self): - return self._api_url - - @staticmethod - # def new_device(name, description = '', resolution = '1Min', epsg = config._epsg, enabled = True, location = None, sensors = None, dry_run = False): - def new_device(name, location = {}, dry_run = False, **kwargs): - - ''' - Configures the device as a new sensor schema. - This is a one-time configuration and shouldn't be necessary in a recursive way. - More information at: https://sensors.nilu.no/api/doc#configure-sensor-schema - - Parameters - ---------- - name: string - Device name - location: dict - None - sensor location. If sensor is moving (i.e. position is not fixed), - then location must explicitly be set to an empty object: {} when configured. Also see this section. - location = { - 'longitude': longitude (double) – sensor east-west position, - 'latitude': latitude (double) – sensor north-south position, - 'altitude': altitude (double) – sensor height above sea level - } - dry_run: boolean - False - Post the payload to the API or just return it - **kwargs - ------ - description: string, optional - '' - sensor description - frequency: string, optional - '1Min' - pandas formatted frequency - epsg: int, optional - 4326 - SRS EPSG code. Defaults to 4326 (WGS84). More info https://spatialreference.org/ - enabled: boolean, optional - True - flag indicating if sensor is enabled for data transfer - - sensors: dict() - Dictionary containing necessary information of the sensors to be stored. scdata format: - { - 'SHORT_NAME': { - 'desc': 'Channel description', - 'id': 'sensor SC platform id', - 'units': 'sensor_recording_units' - }, - ... - } - ------ - - Returns - ------- - If dry_run, prints out a dict containing the payload and - returns False - If not, either False in case of error or a - dictionary containing: - sensorid (int) – sensor identifier - message (string) – HTTP status text - http-status-code (int) – HTTP status code - atom (string) – atom URL to sensor - ''' - - API_BASE_URL='https://sensors.nilu.no/api/' - API_CONNECTOR='nilu' - - if API_CONNECTOR not in config.connectors: - std_out(f'No connector for {API_CONNECTOR}', 'ERROR') - return False - - if 'NILU_BEARER' not in environ: - std_out('Cannot configure without Auth Bearer', 'ERROR') - return False - - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER'], 'Content-type': 'application/json'} - - if name is None: - std_out('Need a name to create a new sensor', 'ERROR') - return False - std_out (f'Configuring IFLINK device named {name}') - - # Verify inputs - flag_error = False - - dft_input_params = ['epsg', 'description', 'frequency', 'enabled', 'sensors'] - if any([x not in kwargs for x in dft_input_params]): - std_out('Input params not ok for NiluApiDevice', 'ERROR') - return False - - # EPSG int type - try: - epsg = int(kwargs['epsg']) - except: - std_out('Could not convert epsg to int', 'ERROR') - flag_error = True - pass - - # Resolution in seconds - if not flag_error: - try: - resolution_seconds = to_timedelta(kwargs['frequency']).seconds - except: - std_out('Could not convert resolution to seconds', 'ERROR') - flag_error = True - pass - - # Location - if not flag_error: - try: - location['longitude'] - location['latitude'] - location['altitude'] - except KeyError: - std_out('Need latitude, longitude and altitude in location dict', 'ERROR') - flag_error = True - pass - - if flag_error: return False - - # Construct payload - payload = { - "name": name, - "description": kwargs['description'], - "resolution": resolution_seconds, - "srs": { - "epsg": epsg - }, - "enabled": kwargs['enabled'] - } - - payload['location'] = location - - parameters = [] - components = [] - - # Construct - sensors = kwargs['sensors'] - for sensor in sensors.keys(): - # Check if it's in the configured connectors - _sid = str(sensors[sensor]['id']) - - if _sid is None: - std_out(f"Sensor {sensor} id is None. Ignoring", "WARNING") - return False - - if _sid not in config.connectors[API_CONNECTOR]['sensors']: - if config._strict: - std_out(f"Sensor {sensor} not found in connectors list", "ERROR") - return False - std_out(f"Sensor {sensor} not found in connectors list", "WARNING") - continue - - units = sensors[sensor]['units'] - - _pjson = { - "name": sensor, - "type": "double", - "doc": f"{sensors[sensor]['desc']} in {units}" - } - - _cjson = { - "componentid": config.connectors[API_CONNECTOR]['sensors'][_sid]['id'], - "unitid": config.connectors[API_CONNECTOR]['sensors'][_sid]['unitid'], - "binding-path": f"/{sensor}", - "level": config.connectors[API_CONNECTOR]['sensors'][_sid]['level'] - } - - parameters.append(_pjson) - components.append(_cjson) - # Add timestamp as long - parameters.append({ - 'name': 'date', - 'type': 'long', - 'doc': 'Date of measurement' - }) - - # Add the converter (we need to push as input-format) - converters = [{ - "input-type": "string", - "output-type": "StringEpochTime", - "target-path": "/date", - "input-args": { - "input-format": "yyyy-MM-ddTHH:mm:ssZ" - } - }] - - mapping = [{ - "name": "Timestamp", - "target-path": "/date" - }] - - payload['parameters'] = parameters - payload['components'] = components - payload['converters'] = converters - payload['mapping'] = mapping - - if dry_run: - std_out(f'Dry run request to: {API_BASE_URL}sensors/configure') - print(dumps(payload, indent = 2)) - return False - - response = post(f'{API_BASE_URL}sensors/configure', - data = dumps(payload), headers = headers) - - - if response.status_code == 200 or response.status_code == 201: - if 'sensorid' in response.json(): - platform_id = str(response.json()['sensorid']) - platform_url = "https://sensors.nilu.no/api/sensors/" + platform_id - std_out(f'Device created with: \n{platform_url}', 'SUCCESS') - return response.json() - else: - std_out('Response does not contain sensorid field') - else: - std_out(f'{API_BASE_URL} reported {response.status_code}:\n{response.json()}', 'ERROR') - return False - - def get_device_json(self, update = False): - ''' - https://sensors.nilu.no/api/doc#get--sensor-by-id - ''' - if 'NILU_BEARER' in environ: - std_out('Auth Bearer found, using it', 'SUCCESS') - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} - else: - std_out('Cannot request without bearer', 'ERROR') - return None - - if self.devicejson is None or update: - try: - deviceR = get(f'{self.API_BASE_URL}sensors/{self.id}') - if deviceR.status_code == 429: - std_out('API reported {}. Retrying once'.format(deviceR.status_code), - 'WARNING') - sleep(30) - deviceR = get(f'{self.API_BASE_URL}sensors/{self.id}', headers = headers) - - if deviceR.status_code == 200 or deviceR.status_code == 201: - self.devicejson = deviceR.json() - else: - std_out('API reported {}'.format(deviceR.status_code), 'ERROR') - except: - std_out('Failed request. Probably no connection', 'ERROR') - pass - return self.devicejson - - def get_device_description(self, update = False): - if self.get_device_json(update) is not None: - return self.get_device_json()['description'] - return None - - def get_device_lat_long(self, update = False): - - if self.lat is None or self.long is None or update: - if self.get_device_json(update) is not None: - latidude = longitude = None - if 'location' in self.devicejson.keys(): - latitude, longitude = self.devicejson['location']['latitude'], self.devicejson['location']['longitude'] - - self.lat = latitude - self.long = longitude - - std_out ('Device {} is located at {}, {}'.format(self.id, self.lat, self.long)) - - return (self.lat, self.long) - - def get_device_alt(self, update = False): - - if self.lat is None or self.long is None: - self.get_device_lat_long(update) - - if self.alt is None or update: - self.alt = get_elevation(_lat = self.lat, _long = self.long) - - std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) - - return self.alt - - def get_device_added_at(self, update = False): - - if 'NILU_BEARER' in environ: - std_out('Auth Bearer found, using it', 'SUCCESS') - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} - else: - std_out('Cannot request without bearer', 'ERROR') - return None - - if self.added_at is None or update: - try: - response = get(f'{self.API_BASE_URL}data/id/{self.id}/minutc', headers = headers) - if response.status_code == 429: - std_out('API reported {}. Retrying once'.format(response.status_code), - 'WARNING') - sleep(30) - response = get(f'{self.API_BASE_URL}data/id/{self.id}/minutc', headers = headers) - - if response.status_code == 200 or response.status_code == 201: - last_json = response.json() - first_readings = [] - for item in last_json: - if 'timestamp_from_epoch' in item: first_readings.append(item['timestamp_from_epoch']) - - self.added_at = localise_date(datetime.fromtimestamp(max(list(set(first_readings)))), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - else: - std_out(f'API reported {response.status_code}: {response.json()}', 'ERROR') - except: - print_exc() - std_out('Failed request. Probably no connection', 'ERROR') - pass - - std_out ('Device {} has last reading at {}'.format(self.id, self.added_at)) - - return self.added_at - - def get_device_last_reading(self, update = False): - if 'NILU_BEARER' in environ: - std_out('Auth Bearer found, using it', 'SUCCESS') - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} - else: - std_out('Cannot request without bearer', 'ERROR') - return None - - if self.last_reading_at is None or update: - try: - response = get(f'{self.API_BASE_URL}data/id/{self.id}/maxutc', headers = headers) - if response.status_code == 429: - std_out('API reported {}. Retrying once'.format(response.status_code), - 'WARNING') - sleep(30) - response = get(f'{self.API_BASE_URL}data/id/{self.id}/maxutc', headers = headers) - - if response.status_code == 200 or response.status_code == 201: - last_json = response.json() - last_readings = [] - for item in last_json: - if 'timestamp_from_epoch' in item: last_readings.append(item['timestamp_from_epoch']) - - self.last_reading_at = localise_date(datetime.fromtimestamp(max(list(set(last_readings)))), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - else: - std_out(f'API reported {response.status_code}: {response.json()}', 'ERROR') - except: - print_exc() - std_out('Failed request. Probably no connection', 'ERROR') - pass - - std_out ('Device {} has last reading at {}'.format(self.id, self.last_reading_at)) - - return self.last_reading_at - - def get_device_timezone(self, update = False): - - if self.timezone is None or update: - latitude, longitude = self.get_device_lat_long(update) - # Localize it - if latitude is not None and longitude is not None: - # self.timezone = tz_where.tzNameAt(latitude, longitude, forceTZ=True) - self.timezone = tf.timezone_at(lng=longitude, lat=latitude) - - std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) - - return self.timezone - - def get_device_sensors(self, update = False): - - if self.sensors is None or update: - if self.get_device_json(update) is not None: - # Get available sensors - sensors = self.devicejson['components'] - # Put the ids and the names in lists - self.sensors = dict() - for sensor in sensors: - self.sensors[sensor['id']] = sensor['binding-path'][1:] - - return self.sensors - - def get_device_data(self, min_date = None, max_date = None, frequency = '1Min', clean_na = None, resample = True): - ''' - From - https://sensors.nilu.no/api/doc#get--data-from-utc-timestamp-by-id - From-to - https://sensors.nilu.no/api/doc#get--data-from-utc-timestamp-range-by-id - ''' - - if 'NILU_BEARER' in environ: - std_out('Auth Bearer found, using it', 'SUCCESS') - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} - else: - std_out('Cannot request without bearer', 'ERROR') - return None - - std_out(f'Requesting data from {self.API_BASE_URL}') - std_out(f'Device ID: {self.id}') - - # Make sure we have the everything we need beforehand - self.get_device_sensors() - self.get_device_timezone() - # This is not available yet - # self.get_device_added_at() - self.get_device_last_reading() - - if self.timezone is None: - std_out('Device does not have timezone set, skipping', 'WARNING') - return None - - # Check start date and end date - if min_date is not None: - min_date = localise_date(to_datetime(min_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - std_out (f'Min Date: {min_date}') - else: - std_out(f"No min_date specified, requesting all", 'WARNING') - # min_date = localise_date(to_datetime(self.added_at), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - - if max_date is not None: - max_date = localise_date(to_datetime(max_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - std_out (f'Max Date: {max_date}') - else: - std_out(f"No max_date specified") - - # Trim based on actual data available - if min_date is not None and self.last_reading_at is not None: - if min_date > self.last_reading_at: - std_out(f'Device request would yield empty data (min_date). Returning', 'WARNING') - return None - - if max_date is not None and self.added_at is not None: - if max_date < self.added_at: - std_out(f'Device request would yield empty data (max_date). Returning', 'WARNING') - return None - - if max_date is not None and self.last_reading_at is not None: - if max_date > self.last_reading_at: - std_out('Trimming max_date to last reading', 'WARNING') - max_date = self.last_reading_at - - # Print stuff - std_out(f'Device timezone: {self.timezone}') - if not self.sensors.keys(): - std_out(f'Device is empty') - return None - else: std_out(f'Sensor IDs: {list(self.sensors.keys())}') - - df = DataFrame() - - # Request sensor per ID - request = f'{self.API_BASE_URL}data/id/{self.id}/' - - if min_date is not None: request += f'fromutc/{min_date}/' - if max_date is not None: request += f'toutc/{max_date}' - - # Make request - response = get(request, headers = headers) - - # Retry once in case of 429 after 30s - if response.status_code == 429: - std_out('Too many requests, waiting for 1 more retry', 'WARNING') - sleep (30) - response = get(request, headers = headers) - - df = DataFrame(response.json()).pivot(index='timestamp_from_epoch', columns='component', values='value') - df.columns.name = None - df.index = localise_date(to_datetime(df.index, unit='s'), self.timezone) - df = df.reindex(df.index.rename('TIME')) - - # Drop unnecessary columns - df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) - # Check for weird things in the data - df = df.apply(to_numeric, errors='coerce') - # Resample - if (resample): - df = df.resample(frequency).mean() - df = clean(df, clean_na, how = 'all') - - # Rename columns - d = {} - for component in self.devicejson['components']: - if 'name' in component: d[component['name']]=self.sensors[component['id']] - df = df.rename(columns=d) - - self.data = df - - std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') - return self.data - - def post_data_to_device(self, df, clean_na = 'drop', chunk_size = None, dry_run = False, max_retries = 2): - ''' - POST external data in the IFLINK API, following - https://sensors.nilu.no/api/doc#push--sensor-data-by-id - Parameters - ---------- - df: pandas DataFrame - Contains data in a DataFrame format. - Data is posted using the column name of the dataframe - Data is posted in UTC TZ so dataframe needs to have located - timestamp - clean_na: string, optional - 'drop' - 'drop', 'fill' - chunk_size: None (not used?) - chunk size to split resulting pandas DataFrame for posting readings - dry_run: boolean - False - Post the payload to the API or just return it - max_retries: int - 2 - Maximum number of retries per chunk - Returns - ------- - True if the data was posted succesfully - ''' - - if 'NILU_BEARER' not in environ: - std_out('Cannot post without Auth Bearer', 'ERROR') - return False - - headers = {'Authorization':'Bearer ' + environ['NILU_BEARER'], - 'Content-type': 'application/json'} - - # Clean df of nans - df = clean(df, clean_na, how = 'all') - - std_out(f'Posting columns to {self.API_BASE_URL}.') - std_out(f'Rest in schema are empty: {list(df.columns)}') - - # Fill with declared schema to avoid rejection by the API - self.get_device_sensors() - for sensor in self.sensors: - if self.sensors[sensor] not in df.columns: - df[self.sensors[sensor]] = nan - - # Split the dataframe in chunks - std_out(f'Splitting post in chunks of size {chunk_size}') - - for i in trange(len(df.index), file=sys.stdout, - desc=f"Posting data for {self.id}..."): - - row = DataFrame(df.loc[df.index[i],:]).T - # Prepare json post - payload = {} - payload['date'] = localise_date(df.index[i], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - - for column in row.columns: - payload[column] = row.loc[df.index[i], column] - - if dry_run: - std_out(f'Dry run request to: {self.API_BASE_URL}sensors/{self.id}/inbound') - return dumps(payload, indent = 2, cls = NpEncoder) - - post_ok = False - retries = 0 - - while post_ok == False and retries < max_retries: - - response = post(f'{self.API_BASE_URL}sensors/{self.id}/inbound', - data = dumps(payload, cls = NpEncoder), headers = headers) - - if response.status_code == 200 or response.status_code == 201: - post_ok = True - break - else: - retries += 1 - std_out (f'Chunk ({i+1}/{len(df.index)}) post failed. \ - API responded {response.status_code}.\ - Retrying ({retries}/{max_retries}', 'WARNING') - - if (not post_ok) or (retries == max_retries): - std_out (f'Chunk ({i+1}/{len(df.index)}) post failed. \ - API responded {response.status_code}.\ - Reached max_retries', 'ERROR') - return False - - return True +# def convert_rollup(self, frequency): +# # Convert frequency from pandas to API's +# for index, letter in enumerate(frequency): +# try: +# aux = int(letter) +# except: +# index_first = index +# letter_first = letter +# rollup_value = frequency[:index_first] +# frequency_unit = frequency[index_first:] +# break + +# for item in config._freq_conv_lut: +# if item[1] == frequency_unit: +# rollup_unit = item[0] +# break + +# rollup = rollup_value + rollup_unit +# return rollup + +# def get_device_data(self, min_date = None, max_date = None, frequency = '1Min', clean_na = None, resample = True): + +# if 'SC_ADMIN_BEARER' in environ: +# std_out('Admin Bearer found, using it', 'SUCCESS') + +# headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER']} +# else: +# headers = None +# std_out('Admin Bearer not found', 'WARNING') + +# std_out(f'Requesting data from SC API') +# std_out(f'Device ID: {self.id}') + +# rollup = self.convert_rollup(frequency) +# std_out(f'Using rollup: {rollup}') + +# # Make sure we have the everything we need beforehand +# self.get_device_sensors() +# self.get_device_timezone() +# self.get_device_last_reading() +# self.get_device_added_at() +# self.get_kit_ID() + +# if self.timezone is None: +# std_out('Device does not have timezone set, skipping', 'WARNING') +# return None + +# # Check start date and end date +# # Converting to UTC by passing None +# # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.tz_convert.html +# if min_date is not None: +# min_date = localise_date(to_datetime(min_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') +# std_out (f'Min Date: {min_date}') +# else: +# min_date = localise_date(to_datetime('2001-01-01'), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') +# std_out(f"No min_date specified") + +# if max_date is not None: +# max_date = localise_date(to_datetime(max_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%S') +# std_out (f'Max Date: {max_date}') + +# # Trim based on actual data available +# if min_date is not None and self.last_reading_at is not None: +# if min_date > self.last_reading_at: +# std_out(f'Device request would yield empty data (min_date). Returning', 'WARNING') +# return None + +# if max_date is not None and self.added_at is not None: +# if max_date < self.added_at: +# std_out(f'Device request would yield empty data (max_date). Returning', 'WARNING') +# return None + +# if max_date is not None and self.last_reading_at is not None: +# if max_date > self.last_reading_at: +# std_out('Trimming max_date to last reading', 'WARNING') +# max_date = self.last_reading_at + +# # Print stuff +# std_out('Kit ID: {}'.format(self.kit_id)) +# std_out(f'Device timezone: {self.timezone}') +# if not self.sensors.keys(): +# std_out(f'Device is empty') +# return None +# else: std_out(f'Sensor IDs: {list(self.sensors.keys())}') + +# df = DataFrame() +# std_out(f'Requesting from {min_date} to {max_date}') + +# # Get devices in the sensor first +# for sensor_id in self.sensors.keys(): + +# # Request sensor per ID +# request = self.API_BASE_URL + '{}/readings?'.format(self.id) + +# if min_date is not None: request += f'from={min_date}' +# if max_date is not None: request += f'&to={max_date}' + +# request += f'&rollup={rollup}' +# request += f'&sensor_id={sensor_id}' +# request += '&function=avg' + +# # Make request +# response = get(request, headers = headers) + +# # Retry once in case of 429 after 30s +# if response.status_code == 429: +# std_out('Too many requests, waiting for 1 more retry', 'WARNING') +# sleep (30) +# response = get(request, headers = headers) + +# flag_error = False +# try: +# sensorjson = response.json() +# except: +# std_out(f'Problem with json data from API, {response.status_code}', 'ERROR') +# flag_error = True +# pass +# continue + +# if 'readings' not in sensorjson.keys(): +# std_out(f'No readings key in request for sensor: {sensor_id} ({self.sensors[sensor_id]})', 'ERROR') +# flag_error = True +# continue + +# elif sensorjson['readings'] == []: +# std_out(f'No data in request for sensor: {sensor_id} ({self.sensors[sensor_id]})', 'WARNING') +# flag_error = True +# continue + +# if flag_error: continue + +# try: +# dfsensor = DataFrame(sensorjson['readings']).set_index(0) +# dfsensor.columns = [self.sensors[sensor_id]] +# dfsensor.index = localise_date(dfsensor.index, self.timezone) +# dfsensor.sort_index(inplace=True) +# dfsensor = dfsensor[~dfsensor.index.duplicated(keep='first')] + +# # Drop unnecessary columns +# dfsensor.drop([i for i in dfsensor.columns if 'Unnamed' in i], axis=1, inplace=True) +# # Check for weird things in the data +# dfsensor = dfsensor.astype(float, errors='ignore') +# # dfsensor = dfsensor.apply(to_numeric, errors='coerce') +# # Resample +# if (resample): +# dfsensor = dfsensor.resample(frequency).mean() +# df = df.combine_first(dfsensor) +# except: +# print_exc() +# std_out('Problem with sensor data from API', 'ERROR') +# flag_error = True +# pass +# continue + +# try: +# df = df.reindex(df.index.rename('TIME')) +# df = clean(df, clean_na, how = 'all') +# self.data = df + +# except: +# std_out('Problem closing up the API dataframe', 'ERROR') +# pass +# return None + +# if flag_error == False: std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') +# return self.data + +# def post_device_data(self, clean_na = 'drop', chunk_size = 500): +# ''' +# POST self.data in the SmartCitizen API +# Parameters +# ---------- +# clean_na: string, optional +# 'drop' +# 'drop', 'fill' +# chunk_size: integer +# chunk size to split resulting pandas DataFrame for posting readings +# Returns +# ------- +# True if the data was posted succesfully +# ''' +# if self.data is None: +# std_out('No data to post, ignoring', 'ERROR') +# return False + +# if 'SC_BEARER' not in environ: +# std_out('Cannot post without Auth Bearer', 'ERROR') +# return False + +# if 'SC_ADMIN_BEARER' in environ: +# std_out('Using admin Bearer') +# bearer = environ['SC_ADMIN_BEARER'] +# else: +# bearer = environ['SC_BEARER'] + +# headers = {'Authorization':'Bearer ' + bearer, 'Content-type': 'application/json'} +# post_ok = True + +# for sensor_id in self.sensors: +# df = DataFrame(self.data[self.sensors[sensor]]).copy() +# post_ok &= self.post_data_to_device(df, clean_na = clean_na, chunk_size = chunk_size) + +# return post_ok + +# def post_data_to_device(self, df, clean_na = 'drop', chunk_size = 500, dry_run = False, max_retries = 2): +# ''' +# POST external pandas.DataFrame to the SmartCitizen API +# Parameters +# ---------- +# df: pandas DataFrame +# Contains data in a DataFrame format. +# Data is posted using the column names of the dataframe +# Data is posted in UTC TZ so dataframe needs to have located +# timestamp +# clean_na: string, optional +# 'drop' +# 'drop', 'fill' +# chunk_size: integer +# chunk size to split resulting pandas DataFrame for posting readings +# dry_run: boolean +# False +# Post the payload to the API or just return it +# max_retries: int +# 2 +# Maximum number of retries per chunk +# Returns +# ------- +# True if the data was posted succesfully +# ''' +# if 'SC_BEARER' not in environ: +# std_out('Cannot post without Auth Bearer', 'ERROR') +# return False + +# if 'SC_ADMIN_BEARER' in environ: +# std_out('Using admin Bearer') +# bearer = environ['SC_ADMIN_BEARER'] +# else: +# bearer = environ['SC_BEARER'] + +# headers = {'Authorization':'Bearer ' + bearer, 'Content-type': 'application/json'} + +# # Clean df of nans +# df = clean(df, clean_na, how = 'all') +# std_out(f'Posting columns to {self.API_BASE_URL}') +# std_out(f'{list(df.columns)}') +# df.index.name = 'recorded_at' + +# # Split the dataframe in chunks +# std_out(f'Splitting post in chunks of size {chunk_size}') +# chunked_dfs = [df[i:i+chunk_size] for i in range(0, df.shape[0], chunk_size)] + +# for i in trange(len(chunked_dfs), file=sys.stdout, +# desc=f"Posting data for {self.id}..."): + +# chunk = chunked_dfs[i].copy() + +# # Prepare json post +# payload = {"data":[]} +# for item in chunk.index: +# payload["data"].append( +# { +# "recorded_at": localise_date(item, 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ'), +# "sensors": [{ +# "id": column, +# "value": chunk.loc[item, column] +# } for column in chunk.columns if not isnan(chunk.loc[item, column])] +# } +# ) + +# if dry_run: +# std_out(f'Dry run request to: {self.API_BASE_URL}{self.id}/readings for chunk ({i+1}/{len(chunked_dfs)})') +# return dumps(payload, indent = 2, cls = NpEncoder) + +# post_ok = False +# retries = 0 + +# while post_ok == False and retries < max_retries: +# response = post(f'{self.API_BASE_URL}{self.id}/readings', +# data = dumps(payload, cls = NpEncoder), headers = headers) + +# if response.status_code == 200 or response.status_code == 201: +# post_ok = True +# break +# else: +# retries += 1 +# std_out (f'Chunk ({i+1}/{len(chunked_dfs)}) post failed. \ +# API responded {response.status_code}.\ +# Retrying ({retries}/{max_retries}', 'WARNING') + +# if (not post_ok) or (retries == max_retries): +# std_out (f'Chunk ({i+1}/{len(chunked_dfs)}) post failed. \ +# API responded {response.status_code}.\ +# Reached max_retries', 'ERROR') +# return False + +# return True + +# def patch_postprocessing(self, dry_run = False): +# ''' +# POST postprocessing info into the device in the SmartCitizen API +# Updates all the post info. Changes need to be made info the keys of the postprocessing outside of here + +# # Example postprocessing: +# # { +# # "blueprint_url": "https://github.com/fablabbcn/smartcitizen-data/blob/master/blueprints/sc_21_station_module.json", +# # "hardware_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/master/hardware/SCAS210001.json", +# # "latest_postprocessing": "2020-10-29T08:35:23Z" +# # } +# ''' + +# if 'SC_ADMIN_BEARER' not in environ: +# std_out('Cannot post without Admin Auth Bearer', 'ERROR') +# return + +# headers = {'Authorization':'Bearer ' + environ['SC_ADMIN_BEARER'], +# 'Content-type': 'application/json'} + +# post = {"postprocessing_attributes": self.postprocessing} +# post_json = dumps(post) + +# if dry_run: +# std_out(f'Dry run request to: {self.API_BASE_URL}{self.id}/') +# return dumps(post_json, indent = 2) + +# std_out(f'Posting postprocessing_attributes:\n {post_json}') +# response = patch(f'{self.API_BASE_URL}{self.id}/', +# data = post_json, headers = headers) + +# if response.status_code == 200 or response.status_code == 201: +# std_out(f"Postprocessing posted", "SUCCESS") +# return True +# else: +# std_out(f"API responded with {response.status_code}") + +# return False + +# class MuvApiDevice: + +# API_BASE_URL='https://data.waag.org/api/muv/' + +# def __init__ (self, did): +# self.id = did +# self.timezone = None +# self.data = None +# self.sensors = None + +# def get_device_timezone(self): +# self.timezone = 'Europe/Madrid' +# return self.timezone + +# def get_device_sensors(self): +# if self.sensors is None: +# self.sensors = dict() +# for key in config.blueprints: +# if 'muv' not in key: continue +# if 'sensors' in config.blueprints[key]: +# for sensor_name in config.blueprints[key]['sensors'].keys(): +# # IDs are unique +# self.sensors[config.blueprints[key]['sensors'][sensor_name]['id']] = sensor_name +# return self.sensors + +# def get_device_data(self, min_date = None, max_date = None, frequency = '3Min', clean_na = None, resample = True): + +# if min_date is not None: days_ago = (to_datetime(date.today())-to_datetime(min_date)).days +# else: days_ago = 365 # One year of data + +# std_out(f'Requesting data from MUV API') +# std_out(f'Device ID: {self.id}') +# self.get_device_timezone() +# self.get_device_sensors() + +# # Get devices +# try: +# if days_ago == -1: url = f'{self.API_BASE_URL}getSensorData?sensor_id={self.id}' +# else: url = f'{self.API_BASE_URL}getSensorData?sensor_id={self.id}&days={days_ago}' +# df = DataFrame(get(url).json()) +# except: +# print_exc() +# std_out('Failed sensor request request. Probably no connection', 'ERROR') +# pass +# return None + +# try: +# # Rename columns +# df.rename(columns = self.sensors, inplace = True) +# df = df.set_index('time') + +# df.index = localise_date(df.index, self.timezone) +# df = df[~df.index.duplicated(keep='first')] +# # Drop unnecessary columns +# df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) +# df.drop('id', axis=1, inplace=True) +# # Check for weird things in the data +# df = df.apply(to_numeric, errors='coerce') +# # # Resample +# if (resample): +# df = df.resample(frequency).mean() +# df = df.reindex(df.index.rename('TIME')) + +# df = clean(df, clean_na, how = 'all') + +# self.data = df + +# except: +# print_exc() +# std_out('Problem closing up the API dataframe', 'ERROR') +# pass +# return None + +# std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') +# return self.data + +# class DadesObertesApiDevice: + +# API_BASE_URL="https://analisi.transparenciacatalunya.cat/resource/tasf-thgu.json" + +# def __init__ (self, did = None, within = None): +# if did is None and within is None: +# std_out('Specify either station id (=codi_eoi) or within (=(lat, long, radius_meters))') +# return + +# if did is not None: self.id = did +# if within is not None: self.id = self.get_id_from_within(within) + +# self.timezone = None +# self.data = None +# self.sensors = None +# self.devicejson = None +# self.lat = None +# self.long = None +# self.alt = None +# self.timezone = None + +# @staticmethod +# def get_world_map(city = None, within = None, station_type = None, area_type = None, full = False): +# """ +# Gets devices from Dades Obertes API with certain requirements +# Parameters +# ---------- +# city: string, optional +# Empty string +# City +# within: tuple +# Empty tuple +# Gets the devices within a circle center on lat, long with a radius_meters +# within = tuple(lat, long, radius_meters) +# station_type: string +# None +# Type of station, to choose from: 'background', nan or 'traffic' +# area_type: string +# None +# Type of area, to choose from: nan, 'peri-urban', 'rural', 'suburban', 'urban' +# full: bool +# False +# Return full dataframe or not +# Returns +# ------- +# A list of eoi codes that comply with the requirements. If no requirements are set, returns all of them +# """ +# def is_within_circle(x, within): +# if isnan(x['latitud']): return False +# if isnan(x['longitud']): return False + +# return distance(location_A=(within[0], within[1]), location_B=(x['latitude'], x['longitude'])).m < within[2] + +# world_map = get("https://analisi.transparenciacatalunya.cat/resource/tasf-thgu.json") +# df = read_json(StringIO(world_map.content.decode('utf-8'))).set_index('codi_eoi') + +# # Location +# if city is not None: df=df[(df['municipi']==city)] +# if within is not None: + +# df['within'] = df.apply(lambda x: is_within_circle(x, within), axis=1) +# df=df[(df['within']==True)] + +# # Station type +# if station_type is not None: df=df[(df['tipus_estacio']==station_type)] +# # Area type +# if area_type is not None: df=df[(df['area_urbana']==area_type)] + +# if full: return df +# return list(set(list(df.index))) + +# def get_id_from_within(self, within): +# ''' +# Gets the stations within a radius in meters. +# within = tuple(lat, long, radius_meters) +# ''' +# request = self.API_BASE_URL +# request += f'$where=within_circle(geocoded_column,{within[0]},{within[1]},{within[2]})' + +# try: +# s = get(request) +# except: +# std_out('Problem with request from API', 'ERROR') +# return None + +# if s.status_code == 200 or s.status_code == 201: +# df = read_json(StringIO(s.content.decode('utf-8'))) +# else: +# std_out('API reported {}'.format(s.status_code), 'ERROR') +# return None + +# if 'codi_eoi' in df.columns: +# ids = list(set(df.codi_eoi.values)) +# if ids == []: std_out('No stations within range', 'ERROR') +# elif len(ids) > 1: +# for ptid in ids: +# municipi = next(iter(set(df[df.codi_eoi==ptid].municipi.values))) +# nom_estacio = next(iter(set(df[df.codi_eoi==ptid].nom_estacio.values))) +# area_urbana = next(iter(set(df[df.codi_eoi==ptid].area_urbana.values))) +# tipus_estacio = next(iter(set(df[df.codi_eoi==ptid].tipus_estacio.values))) + +# std_out(f'{ids.index(ptid)+1} /- {ptid} --- {municipi} - {nom_estacio} - Type: {area_urbana} - {tipus_estacio}') + +# wptid = int(input('Multiple stations found, please select one: ')) - 1 + +# devid = ids[wptid] +# std_out(f'Selected station in {next(iter(set(df[df.codi_eoi==devid].municipi.values)))} with codi_eoi={devid}') +# else: +# devid = ids[0] +# municipi = next(iter(set(df[df.codi_eoi==devid].municipi.values))) +# nom_estacio = next(iter(set(df[df.codi_eoi==devid].nom_estacio.values))) +# area_urbana = next(iter(set(df[df.codi_eoi==devid].area_urbana.values))) +# tipus_estacio = next(iter(set(df[df.codi_eoi==devid].tipus_estacio.values))) +# std_out(f'Found station in {next(iter(set(df[df.codi_eoi==devid].municipi.values)))} with codi_eoi={devid}') +# std_out(f'Found station in {municipi} - {nom_estacio} - {devid} - Type: {area_urbana} - {tipus_estacio}') + +# else: +# std_out('Data is empty', 'ERROR') +# return None + +# return devid + +# def get_device_sensors(self): + +# if self.sensors is None: +# if self.get_device_json() is not None: +# # Get available sensors +# sensors = list(set(self.devicejson.contaminant)) + +# # Put the ids and the names in lists +# self.sensors = dict() +# for sensor in sensors: +# for key in config.blueprints: +# if not search("csic_station",key): continue +# if 'sensors' in config.blueprints[key]: +# for sensor_name in config.blueprints[key]['sensors'].keys(): +# if config.blueprints[key]['sensors'][sensor_name]['id'] == str(sensor): +# # IDs are unique +# self.sensors[sensor] = sensor_name + +# return self.sensors + +# def get_device_json(self): + +# if self.devicejson is None: +# try: +# s = get(self.API_BASE_URL + f'/?codi_eoi={self.id}') +# if s.status_code == 200 or s.status_code == 201: +# self.devicejson = read_json(StringIO(s.content.decode('utf-8'))) +# else: +# std_out('API reported {}'.format(s.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# return self.devicejson + +# def get_device_timezone(self): + +# if self.timezone is None: +# latitude, longitude = self.get_device_lat_long() +# # Localize it +# # self.timezone = tz_where.tzNameAt(latitude, longitude) +# self.timezone = tf.timezone_at(lng=longitude, lat=latitude) + +# std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) + +# return self.timezone + +# def get_device_alt(self, update = False): + +# if self.lat is None or self.long is None: +# self.get_device_lat_long(update) + +# if self.alt is None or update: +# self.alt = get_elevation(_lat = self.lat, _long = self.long) + +# std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) + +# return self.alt + +# def get_device_lat_long(self): + +# if self.lat is None or self.long is None: +# if self.get_device_json() is not None: +# latitude = longitude = None +# if 'latitud' in self.devicejson.columns: +# latitude = next(iter(set(self.devicejson.latitud))) +# longitude = next(iter(set(self.devicejson.longitud))) + +# self.lat = latitude +# self.long = longitude + +# std_out ('Device {} is located at {}, {}'.format(self.id, latitude, longitude)) + +# return (self.lat, self.long) + +# def get_device_data(self, min_date = None, max_date = None, frequency = '1H', clean_na = None, resample = True): +# ''' +# Based on code snippet from Marc Roig: +# # I2CAT RESEARCH CENTER - BARCELONA - MARC ROIG (marcroig@i2cat.net) +# ''' + +# std_out(f'Requesting data from Dades Obertes API') +# std_out(f'Device ID: {self.id}') +# self.get_device_sensors() +# self.get_device_timezone() + +# request = self.API_BASE_URL +# request += f'/?codi_eoi={self.id}' + +# if min_date is not None and max_date is not None: +# request += "&$where=data between " + to_datetime(min_date).strftime("'%Y-%m-%dT%H:%M:%S'") \ +# + " and " + to_datetime(max_date).strftime("'%Y-%m-%dT%H:%M:%S'") +# elif min_date is not None: +# request += "&$where=data >= " + to_datetime(min_date).strftime("'%Y-%m-%dT%H:%M:%S'") +# elif max_date is not None: +# request += "&$where=data < " + to_datetime(max_date).strftime("'%Y-%m-%dT%H:%M:%S'") + +# try: +# s = get(request) +# except: +# print_exc() +# std_out('Problem with sensor data from API', 'ERROR') +# pass +# return None + +# if s.status_code == 200 or s.status_code == 201: +# df = read_json(StringIO(s.content.decode('utf-8'))) +# else: +# std_out('API reported {}'.format(s.status_code), 'ERROR') +# pass +# return None + +# # Filter columns +# measures = ['h0' + str(i) for i in range(1,10)] +# measures += ['h' + str(i) for i in range(10,25)] +# # validations = ['v0' + str(i) for i in range(1,10)] +# # validations += ['v' + str(i) for i in range(10,25)] +# new_measures_names = list(range(1,25)) + +# columns = ['contaminant', 'data'] + measures# + validations +# try: +# df_subset = df[columns] +# df_subset.columns = ['contaminant', 'date'] + new_measures_names +# except: +# print_exc() +# std_out('Problem while filtering columns', 'Error') +# return None +# else: +# std_out('Successful filtering', 'SUCCESS') + +# # Pivot +# try: +# df = DataFrame([]) +# for contaminant in self.sensors.keys(): +# if contaminant not in df_subset['contaminant'].values: +# std_out(f'{contaminant} not in columns. Skipping', 'WARNING') +# continue +# df_temp= df_subset.loc[df_subset['contaminant']==contaminant].drop('contaminant', 1).set_index('date').unstack().reset_index() +# df_temp.columns = ['hours', 'date', contaminant] +# df_temp['date'] = to_datetime(df_temp['date']) +# timestamp_lambda = lambda x: x['date'] + DateOffset(hours=int(x['hours'])) +# df_temp['date'] = df_temp.apply( timestamp_lambda, axis=1) +# df_temp = df_temp.set_index('date') +# df[contaminant] = df_temp[contaminant] +# except: +# # print_exc() +# std_out('Problem while filtering columns', 'Error') +# pass +# return None +# else: +# std_out('Successful pivoting', 'SUCCESS') + +# df.index = to_datetime(df.index).tz_localize('UTC').tz_convert(self.timezone) +# df.sort_index(inplace=True) + +# # Rename +# try: +# df.rename(columns=self.sensors, inplace=True) +# except: +# # print_exc() +# std_out('Problem while renaming columns', 'Error') +# pass +# return None +# else: +# std_out('Successful renaming', 'SUCCESS') + +# # Clean +# df = df[~df.index.duplicated(keep='first')] +# # Drop unnecessary columns +# df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) +# # Check for weird things in the data +# df = df.apply(to_numeric, errors='coerce') +# # Resample +# if (resample): +# df = df.resample(frequency).mean() + +# try: +# df = df.reindex(df.index.rename('TIME')) +# df = clean(df, clean_na, how = 'all') +# self.data = df +# except: +# std_out('Problem closing up the API dataframe', 'ERROR') +# pass +# return None + +# std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') +# return self.data + +# class NiluApiDevice(object): +# """docstring for IflinkDevice""" +# API_BASE_URL='https://sensors.nilu.no/api/' +# API_CONNECTOR='sensors.nilu.no' + +# # Docs +# # https://sensors.nilu.no/api/doc#configure-sensor-schema +# # https://sensors.nilu.no/api/doc#push--sensor-data-by-id + +# def __init__ (self, did): + +# self.id = did +# self.timezone = None +# self.lat = None +# self.long = None +# self.alt = None +# self.data = None +# self.sensors = None +# self.devicejson = None +# self.last_reading_at = None +# self.added_at = None +# self._api_url = self.API_BASE_URL + f'sensors/{self.id}' + +# @property +# def api_url(self): +# return self._api_url + +# @staticmethod +# # def new_device(name, description = '', resolution = '1Min', epsg = config._epsg, enabled = True, location = None, sensors = None, dry_run = False): +# def new_device(name, location = {}, dry_run = False, **kwargs): + +# ''' +# Configures the device as a new sensor schema. +# This is a one-time configuration and shouldn't be necessary in a recursive way. +# More information at: https://sensors.nilu.no/api/doc#configure-sensor-schema + +# Parameters +# ---------- +# name: string +# Device name +# location: dict +# None +# sensor location. If sensor is moving (i.e. position is not fixed), +# then location must explicitly be set to an empty object: {} when configured. Also see this section. +# location = { +# 'longitude': longitude (double) – sensor east-west position, +# 'latitude': latitude (double) – sensor north-south position, +# 'altitude': altitude (double) – sensor height above sea level +# } +# dry_run: boolean +# False +# Post the payload to the API or just return it +# **kwargs +# ------ +# description: string, optional +# '' +# sensor description +# frequency: string, optional +# '1Min' +# pandas formatted frequency +# epsg: int, optional +# 4326 +# SRS EPSG code. Defaults to 4326 (WGS84). More info https://spatialreference.org/ +# enabled: boolean, optional +# True +# flag indicating if sensor is enabled for data transfer + +# sensors: dict() +# Dictionary containing necessary information of the sensors to be stored. scdata format: +# { +# 'SHORT_NAME': { +# 'desc': 'Channel description', +# 'id': 'sensor SC platform id', +# 'units': 'sensor_recording_units' +# }, +# ... +# } +# ------ + +# Returns +# ------- +# If dry_run, prints out a dict containing the payload and +# returns False +# If not, either False in case of error or a +# dictionary containing: +# sensorid (int) – sensor identifier +# message (string) – HTTP status text +# http-status-code (int) – HTTP status code +# atom (string) – atom URL to sensor +# ''' + +# API_BASE_URL='https://sensors.nilu.no/api/' +# API_CONNECTOR='nilu' + +# if API_CONNECTOR not in config.connectors: +# std_out(f'No connector for {API_CONNECTOR}', 'ERROR') +# return False + +# if 'NILU_BEARER' not in environ: +# std_out('Cannot configure without Auth Bearer', 'ERROR') +# return False + +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER'], 'Content-type': 'application/json'} + +# if name is None: +# std_out('Need a name to create a new sensor', 'ERROR') +# return False +# std_out (f'Configuring IFLINK device named {name}') + +# # Verify inputs +# flag_error = False + +# dft_input_params = ['epsg', 'description', 'frequency', 'enabled', 'sensors'] +# if any([x not in kwargs for x in dft_input_params]): +# std_out('Input params not ok for NiluApiDevice', 'ERROR') +# return False + +# # EPSG int type +# try: +# epsg = int(kwargs['epsg']) +# except: +# std_out('Could not convert epsg to int', 'ERROR') +# flag_error = True +# pass + +# # Resolution in seconds +# if not flag_error: +# try: +# resolution_seconds = to_timedelta(kwargs['frequency']).seconds +# except: +# std_out('Could not convert resolution to seconds', 'ERROR') +# flag_error = True +# pass + +# # Location +# if not flag_error: +# try: +# location['longitude'] +# location['latitude'] +# location['altitude'] +# except KeyError: +# std_out('Need latitude, longitude and altitude in location dict', 'ERROR') +# flag_error = True +# pass + +# if flag_error: return False + +# # Construct payload +# payload = { +# "name": name, +# "description": kwargs['description'], +# "resolution": resolution_seconds, +# "srs": { +# "epsg": epsg +# }, +# "enabled": kwargs['enabled'] +# } + +# payload['location'] = location + +# parameters = [] +# components = [] + +# # Construct +# sensors = kwargs['sensors'] +# for sensor in sensors.keys(): +# # Check if it's in the configured connectors +# _sid = str(sensors[sensor]['id']) + +# if _sid is None: +# std_out(f"Sensor {sensor} id is None. Ignoring", "WARNING") +# return False + +# if _sid not in config.connectors[API_CONNECTOR]['sensors']: +# if config._strict: +# std_out(f"Sensor {sensor} not found in connectors list", "ERROR") +# return False +# std_out(f"Sensor {sensor} not found in connectors list", "WARNING") +# continue + +# units = sensors[sensor]['units'] + +# _pjson = { +# "name": sensor, +# "type": "double", +# "doc": f"{sensors[sensor]['desc']} in {units}" +# } + +# _cjson = { +# "componentid": config.connectors[API_CONNECTOR]['sensors'][_sid]['id'], +# "unitid": config.connectors[API_CONNECTOR]['sensors'][_sid]['unitid'], +# "binding-path": f"/{sensor}", +# "level": config.connectors[API_CONNECTOR]['sensors'][_sid]['level'] +# } + +# parameters.append(_pjson) +# components.append(_cjson) +# # Add timestamp as long +# parameters.append({ +# 'name': 'date', +# 'type': 'long', +# 'doc': 'Date of measurement' +# }) + +# # Add the converter (we need to push as input-format) +# converters = [{ +# "input-type": "string", +# "output-type": "StringEpochTime", +# "target-path": "/date", +# "input-args": { +# "input-format": "yyyy-MM-ddTHH:mm:ssZ" +# } +# }] + +# mapping = [{ +# "name": "Timestamp", +# "target-path": "/date" +# }] + +# payload['parameters'] = parameters +# payload['components'] = components +# payload['converters'] = converters +# payload['mapping'] = mapping + +# if dry_run: +# std_out(f'Dry run request to: {API_BASE_URL}sensors/configure') +# print(dumps(payload, indent = 2)) +# return False + +# response = post(f'{API_BASE_URL}sensors/configure', +# data = dumps(payload), headers = headers) + + +# if response.status_code == 200 or response.status_code == 201: +# if 'sensorid' in response.json(): +# platform_id = str(response.json()['sensorid']) +# platform_url = "https://sensors.nilu.no/api/sensors/" + platform_id +# std_out(f'Device created with: \n{platform_url}', 'SUCCESS') +# return response.json() +# else: +# std_out('Response does not contain sensorid field') +# else: +# std_out(f'{API_BASE_URL} reported {response.status_code}:\n{response.json()}', 'ERROR') +# return False + +# def get_device_json(self, update = False): +# ''' +# https://sensors.nilu.no/api/doc#get--sensor-by-id +# ''' +# if 'NILU_BEARER' in environ: +# std_out('Auth Bearer found, using it', 'SUCCESS') +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} +# else: +# std_out('Cannot request without bearer', 'ERROR') +# return None + +# if self.devicejson is None or update: +# try: +# deviceR = get(f'{self.API_BASE_URL}sensors/{self.id}') +# if deviceR.status_code == 429: +# std_out('API reported {}. Retrying once'.format(deviceR.status_code), +# 'WARNING') +# sleep(30) +# deviceR = get(f'{self.API_BASE_URL}sensors/{self.id}', headers = headers) + +# if deviceR.status_code == 200 or deviceR.status_code == 201: +# self.devicejson = deviceR.json() +# else: +# std_out('API reported {}'.format(deviceR.status_code), 'ERROR') +# except: +# std_out('Failed request. Probably no connection', 'ERROR') +# pass +# return self.devicejson + +# def get_device_description(self, update = False): +# if self.get_device_json(update) is not None: +# return self.get_device_json()['description'] +# return None + +# def get_device_lat_long(self, update = False): + +# if self.lat is None or self.long is None or update: +# if self.get_device_json(update) is not None: +# latidude = longitude = None +# if 'location' in self.devicejson.keys(): +# latitude, longitude = self.devicejson['location']['latitude'], self.devicejson['location']['longitude'] + +# self.lat = latitude +# self.long = longitude + +# std_out ('Device {} is located at {}, {}'.format(self.id, self.lat, self.long)) + +# return (self.lat, self.long) + +# def get_device_alt(self, update = False): + +# if self.lat is None or self.long is None: +# self.get_device_lat_long(update) + +# if self.alt is None or update: +# self.alt = get_elevation(_lat = self.lat, _long = self.long) + +# std_out ('Device {} altitude is {}m'.format(self.id, self.alt)) + +# return self.alt + +# def get_device_added_at(self, update = False): + +# if 'NILU_BEARER' in environ: +# std_out('Auth Bearer found, using it', 'SUCCESS') +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} +# else: +# std_out('Cannot request without bearer', 'ERROR') +# return None + +# if self.added_at is None or update: +# try: +# response = get(f'{self.API_BASE_URL}data/id/{self.id}/minutc', headers = headers) +# if response.status_code == 429: +# std_out('API reported {}. Retrying once'.format(response.status_code), +# 'WARNING') +# sleep(30) +# response = get(f'{self.API_BASE_URL}data/id/{self.id}/minutc', headers = headers) + +# if response.status_code == 200 or response.status_code == 201: +# last_json = response.json() +# first_readings = [] +# for item in last_json: +# if 'timestamp_from_epoch' in item: first_readings.append(item['timestamp_from_epoch']) + +# self.added_at = localise_date(datetime.fromtimestamp(max(list(set(first_readings)))), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# else: +# std_out(f'API reported {response.status_code}: {response.json()}', 'ERROR') +# except: +# print_exc() +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# std_out ('Device {} has last reading at {}'.format(self.id, self.added_at)) + +# return self.added_at + +# def get_device_last_reading(self, update = False): +# if 'NILU_BEARER' in environ: +# std_out('Auth Bearer found, using it', 'SUCCESS') +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} +# else: +# std_out('Cannot request without bearer', 'ERROR') +# return None + +# if self.last_reading_at is None or update: +# try: +# response = get(f'{self.API_BASE_URL}data/id/{self.id}/maxutc', headers = headers) +# if response.status_code == 429: +# std_out('API reported {}. Retrying once'.format(response.status_code), +# 'WARNING') +# sleep(30) +# response = get(f'{self.API_BASE_URL}data/id/{self.id}/maxutc', headers = headers) + +# if response.status_code == 200 or response.status_code == 201: +# last_json = response.json() +# last_readings = [] +# for item in last_json: +# if 'timestamp_from_epoch' in item: last_readings.append(item['timestamp_from_epoch']) + +# self.last_reading_at = localise_date(datetime.fromtimestamp(max(list(set(last_readings)))), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# else: +# std_out(f'API reported {response.status_code}: {response.json()}', 'ERROR') +# except: +# print_exc() +# std_out('Failed request. Probably no connection', 'ERROR') +# pass + +# std_out ('Device {} has last reading at {}'.format(self.id, self.last_reading_at)) + +# return self.last_reading_at + +# def get_device_timezone(self, update = False): + +# if self.timezone is None or update: +# latitude, longitude = self.get_device_lat_long(update) +# # Localize it +# if latitude is not None and longitude is not None: +# # self.timezone = tz_where.tzNameAt(latitude, longitude, forceTZ=True) +# self.timezone = tf.timezone_at(lng=longitude, lat=latitude) + +# std_out ('Device {} timezone is {}'.format(self.id, self.timezone)) + +# return self.timezone + +# def get_device_sensors(self, update = False): + +# if self.sensors is None or update: +# if self.get_device_json(update) is not None: +# # Get available sensors +# sensors = self.devicejson['components'] +# # Put the ids and the names in lists +# self.sensors = dict() +# for sensor in sensors: +# self.sensors[sensor['id']] = sensor['binding-path'][1:] + +# return self.sensors + +# def get_device_data(self, min_date = None, max_date = None, frequency = '1Min', clean_na = None, resample = True): +# ''' +# From +# https://sensors.nilu.no/api/doc#get--data-from-utc-timestamp-by-id +# From-to +# https://sensors.nilu.no/api/doc#get--data-from-utc-timestamp-range-by-id +# ''' + +# if 'NILU_BEARER' in environ: +# std_out('Auth Bearer found, using it', 'SUCCESS') +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER']} +# else: +# std_out('Cannot request without bearer', 'ERROR') +# return None + +# std_out(f'Requesting data from {self.API_BASE_URL}') +# std_out(f'Device ID: {self.id}') + +# # Make sure we have the everything we need beforehand +# self.get_device_sensors() +# self.get_device_timezone() +# # This is not available yet +# # self.get_device_added_at() +# self.get_device_last_reading() + +# if self.timezone is None: +# std_out('Device does not have timezone set, skipping', 'WARNING') +# return None + +# # Check start date and end date +# if min_date is not None: +# min_date = localise_date(to_datetime(min_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# std_out (f'Min Date: {min_date}') +# else: +# std_out(f"No min_date specified, requesting all", 'WARNING') +# # min_date = localise_date(to_datetime(self.added_at), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + +# if max_date is not None: +# max_date = localise_date(to_datetime(max_date), 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') +# std_out (f'Max Date: {max_date}') +# else: +# std_out(f"No max_date specified") + +# # Trim based on actual data available +# if min_date is not None and self.last_reading_at is not None: +# if min_date > self.last_reading_at: +# std_out(f'Device request would yield empty data (min_date). Returning', 'WARNING') +# return None + +# if max_date is not None and self.added_at is not None: +# if max_date < self.added_at: +# std_out(f'Device request would yield empty data (max_date). Returning', 'WARNING') +# return None + +# if max_date is not None and self.last_reading_at is not None: +# if max_date > self.last_reading_at: +# std_out('Trimming max_date to last reading', 'WARNING') +# max_date = self.last_reading_at + +# # Print stuff +# std_out(f'Device timezone: {self.timezone}') +# if not self.sensors.keys(): +# std_out(f'Device is empty') +# return None +# else: std_out(f'Sensor IDs: {list(self.sensors.keys())}') + +# df = DataFrame() + +# # Request sensor per ID +# request = f'{self.API_BASE_URL}data/id/{self.id}/' + +# if min_date is not None: request += f'fromutc/{min_date}/' +# if max_date is not None: request += f'toutc/{max_date}' + +# # Make request +# response = get(request, headers = headers) + +# # Retry once in case of 429 after 30s +# if response.status_code == 429: +# std_out('Too many requests, waiting for 1 more retry', 'WARNING') +# sleep (30) +# response = get(request, headers = headers) + +# df = DataFrame(response.json()).pivot(index='timestamp_from_epoch', columns='component', values='value') +# df.columns.name = None +# df.index = localise_date(to_datetime(df.index, unit='s'), self.timezone) +# df = df.reindex(df.index.rename('TIME')) + +# # Drop unnecessary columns +# df.drop([i for i in df.columns if 'Unnamed' in i], axis=1, inplace=True) +# # Check for weird things in the data +# df = df.apply(to_numeric, errors='coerce') +# # Resample +# if (resample): +# df = df.resample(frequency).mean() +# df = clean(df, clean_na, how = 'all') + +# # Rename columns +# d = {} +# for component in self.devicejson['components']: +# if 'name' in component: d[component['name']]=self.sensors[component['id']] +# df = df.rename(columns=d) + +# self.data = df + +# std_out(f'Device {self.id} loaded successfully from API', 'SUCCESS') +# return self.data + +# def post_data_to_device(self, df, clean_na = 'drop', chunk_size = None, dry_run = False, max_retries = 2): +# ''' +# POST external data in the IFLINK API, following +# https://sensors.nilu.no/api/doc#push--sensor-data-by-id +# Parameters +# ---------- +# df: pandas DataFrame +# Contains data in a DataFrame format. +# Data is posted using the column name of the dataframe +# Data is posted in UTC TZ so dataframe needs to have located +# timestamp +# clean_na: string, optional +# 'drop' +# 'drop', 'fill' +# chunk_size: None (not used?) +# chunk size to split resulting pandas DataFrame for posting readings +# dry_run: boolean +# False +# Post the payload to the API or just return it +# max_retries: int +# 2 +# Maximum number of retries per chunk +# Returns +# ------- +# True if the data was posted succesfully +# ''' + +# if 'NILU_BEARER' not in environ: +# std_out('Cannot post without Auth Bearer', 'ERROR') +# return False + +# headers = {'Authorization':'Bearer ' + environ['NILU_BEARER'], +# 'Content-type': 'application/json'} + +# # Clean df of nans +# df = clean(df, clean_na, how = 'all') + +# std_out(f'Posting columns to {self.API_BASE_URL}.') +# std_out(f'Rest in schema are empty: {list(df.columns)}') + +# # Fill with declared schema to avoid rejection by the API +# self.get_device_sensors() +# for sensor in self.sensors: +# if self.sensors[sensor] not in df.columns: +# df[self.sensors[sensor]] = nan + +# # Split the dataframe in chunks +# std_out(f'Splitting post in chunks of size {chunk_size}') + +# for i in trange(len(df.index), file=sys.stdout, +# desc=f"Posting data for {self.id}..."): + +# row = DataFrame(df.loc[df.index[i],:]).T +# # Prepare json post +# payload = {} +# payload['date'] = localise_date(df.index[i], 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + +# for column in row.columns: +# payload[column] = row.loc[df.index[i], column] + +# if dry_run: +# std_out(f'Dry run request to: {self.API_BASE_URL}sensors/{self.id}/inbound') +# return dumps(payload, indent = 2, cls = NpEncoder) + +# post_ok = False +# retries = 0 + +# while post_ok == False and retries < max_retries: + +# response = post(f'{self.API_BASE_URL}sensors/{self.id}/inbound', +# data = dumps(payload, cls = NpEncoder), headers = headers) + +# if response.status_code == 200 or response.status_code == 201: +# post_ok = True +# break +# else: +# retries += 1 +# std_out (f'Chunk ({i+1}/{len(df.index)}) post failed. \ +# API responded {response.status_code}.\ +# Retrying ({retries}/{max_retries}', 'WARNING') + +# if (not post_ok) or (retries == max_retries): +# std_out (f'Chunk ({i+1}/{len(df.index)}) post failed. \ +# API responded {response.status_code}.\ +# Reached max_retries', 'ERROR') +# return False + +# return True From fa290ead537df810d8155344fa2a4608e9a24063 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:03:49 +0200 Subject: [PATCH 33/72] Rename file api --- scdata/io/{csv.py => device_file.py} | 52 ++++++++++++++++++---------- 1 file changed, 34 insertions(+), 18 deletions(-) rename scdata/io/{csv.py => device_file.py} (84%) diff --git a/scdata/io/csv.py b/scdata/io/device_file.py similarity index 84% rename from scdata/io/csv.py rename to scdata/io/device_file.py index 5d0a9600..db2b9180 100644 --- a/scdata/io/csv.py +++ b/scdata/io/device_file.py @@ -1,11 +1,29 @@ from os import makedirs, listdir from os.path import exists, join, splitext -from scdata.utils import std_out, localise_date, clean +from scdata.utils import logger, localise_date, clean from pandas import read_csv, to_datetime, DataFrame from scdata._config import config import csv +# from scdata.models import CSVParams, CSVFiles +from pydantic import BaseModel, ConfigDict +# class CSVHandler(BaseModel): +# ''' Main implementation of the device class ''' +# model_config = ConfigDict(arbitrary_types_allowed = True) + +# params: CSVParams = CSVParams() +# files: CSVFiles = CSVFiles() +# method: 'sync' + +# # TODO - Fix +# def export(self): +# return True + +# # TODO - Fix +# def get_data(self): +# return True + def export_csv_file(path, file_name, df, forced_overwrite=False): ''' Exports pandas dataframe to a csv file @@ -32,20 +50,18 @@ def export_csv_file(path, file_name, df, forced_overwrite=False): # If file does not exist if not exists(path + '/' + str(file_name) + '.csv') or forced_overwrite: df.to_csv(path + '/' + str(file_name) + '.csv', sep=",") - std_out('File saved to: \n' + path + '/' + str(file_name) + '.csv', 'SUCCESS') + logger.info('File saved to: \n' + path + '/' + str(file_name) + '.csv') else: - std_out("File Already exists - delete it first, I was not asked to overwrite anything!", 'ERROR') + logger.error("File Already exists - delete it first, I was not asked to overwrite anything!") return False - return True - -def read_csv_file(file_path, timezone, frequency=None, clean_na=None, index_name='', skiprows=None, sep=',', encoding='utf-8', tzaware=True, resample=True): +def read_csv_file(path, timezone, frequency=None, clean_na=None, index_name='', skiprows=None, sep=',', encoding='utf-8', tzaware=True, resample=True): """ Reads a csv file and adds cleaning, localisation and resampling and puts it into a pandas dataframe Parameters ---------- - file_path: String + path: String File path for csv file timezone: String Time zone for the csv file @@ -74,8 +90,8 @@ def read_csv_file(file_path, timezone, frequency=None, clean_na=None, index_name # Read pandas dataframe - df = read_csv(file_path, verbose=False, skiprows=skiprows, sep=sep, - encoding=encoding, encoding_errors='ignore') + df = read_csv(path, verbose=False, skiprows=skiprows, sep=sep, + encoding=encoding, encoding_errors='ignore') flag_found = False if type(index_name) == str: @@ -89,7 +105,7 @@ def read_csv_file(file_path, timezone, frequency=None, clean_na=None, index_name # Composite index (for instance, DATE and TIME in different columns) for iname in index_name: if iname not in df.columns: - std_out(f'{iname} not found in columns', 'ERROR') + logger.error(f'{iname} not found in columns') return None joint_index_name = '_'.join(index_name) df[joint_index_name] = df[index_name].agg(' '.join, axis=1) @@ -98,7 +114,7 @@ def read_csv_file(file_path, timezone, frequency=None, clean_na=None, index_name flag_found = True if not flag_found: - std_out('Index not found. Cannot reindex', 'ERROR') + logger.error('Index not found. Cannot reindex') return None # Set index @@ -118,7 +134,7 @@ def read_csv_file(file_path, timezone, frequency=None, clean_na=None, index_name # Resample if (resample): - std_out ('Resampling', 'INFO') + logger.info ('Resampling', 'INFO') df = df.resample(frequency).mean() # Remove na @@ -158,7 +174,7 @@ def sdcard_concat(path, output = 'CONCAT.CSV', index_name = 'TIME', keep = True, for file in listdir(path): if file != output and file not in ignore: - std_out(f'Loading file: {file}') + logger.info(f'Loading file: {file}') filename, _ = splitext(file) src_path = join(path, file) @@ -167,7 +183,7 @@ def sdcard_concat(path, output = 'CONCAT.CSV', index_name = 'TIME', keep = True, header = csv_file.readlines()[0:4] except: ignore_file = True - std_out(f'Ignoring file: {file}', 'WARNING') + logger.warning(f'Ignoring file: {file}') pass else: ignore_file = False @@ -209,14 +225,14 @@ def sdcard_concat(path, output = 'CONCAT.CSV', index_name = 'TIME', keep = True, if 'blueprint' in kwargs: rename_bp = kwargs['blueprint'] if rename_bp not in config.blueprints: - std_out('Blueprint not in config. Cannot rename', 'WARNING') + logger.warning('Blueprint not in config. Cannot rename') rename = False else: - std_out('No blueprint specified', 'INFO') + logger.info('No blueprint specified') rename = False if rename: - std_out('Keep in mind that renaming doesnt change the units', 'WARNING') + logger.warning('Keep in mind that renaming doesnt change the units') rename_d = dict() for old_key in header_tokenized: for key, value in config.blueprints[rename_bp]['sensors'].items(): @@ -225,7 +241,7 @@ def sdcard_concat(path, output = 'CONCAT.CSV', index_name = 'TIME', keep = True, break for old_key in rename_d: - std_out(f'Renaming {old_key} to {rename_d[old_key]}') + logger.info(f'Renaming {old_key} to {rename_d[old_key]}') header_tokenized[rename_d[old_key]] = header_tokenized.pop(old_key) concat.rename(columns=rename_d, inplace=True) From 1f91c6f59d907381034d09edf90f47d81074f990 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:04:22 +0200 Subject: [PATCH 34/72] Air and water blueprints --- blueprints/sc_air_api.json | 4 ++-- blueprints/sc_water_api.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/blueprints/sc_air_api.json b/blueprints/sc_air_api.json index f5fede90..5a0c6d58 100644 --- a/blueprints/sc_air_api.json +++ b/blueprints/sc_air_api.json @@ -258,7 +258,7 @@ ], "source":{ "type": "api", - "handler": "SCDevice", - "module": "smartcitizen_connector" + "module": "smartcitizen_connector", + "handler": "SCDevice" } } diff --git a/blueprints/sc_water_api.json b/blueprints/sc_water_api.json index eb6490ce..08c2cec2 100644 --- a/blueprints/sc_water_api.json +++ b/blueprints/sc_water_api.json @@ -5,7 +5,7 @@ "metrics": [], "source":{ "type": "api", - "handler": "SCDevice", - "module": "smartcitizen_connector" + "module": "smartcitizen_connector", + "handler": "SCDevice" } } From eeb800b1cac65a22a5d5581930b1cd42c57d1352 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:04:44 +0200 Subject: [PATCH 35/72] Models in init --- scdata/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scdata/__init__.py b/scdata/__init__.py index 47695582..81b53c58 100644 --- a/scdata/__init__.py +++ b/scdata/__init__.py @@ -1,4 +1,5 @@ -from .test import Test from .device import Device +from .test import Test +from .models import TestOptions, DeviceOptions, APIParams, FileParams __version__ = '0.9.1' From 552e4a67a00499f44343fe52201ab71667af72eb Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:05:39 +0200 Subject: [PATCH 36/72] Missing imports for device file --- scdata/device/process/__init__.py | 2 +- scdata/io/__init__.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/scdata/device/process/__init__.py b/scdata/device/process/__init__.py index c32342d4..9f6d6b99 100644 --- a/scdata/device/process/__init__.py +++ b/scdata/device/process/__init__.py @@ -6,4 +6,4 @@ from .timeseries import clean_ts, merge_ts, rolling_avg, poly_ts, geo_located, time_derivative, delta_index_ts from .baseline import find_min_max, baseline_calc, get_delta_baseline, get_als_baseline from .alphasense import alphasense_803_04, alphasense_pt1000, channel_names, basic_4electrode_alg, baseline_4electrode_alg, deconvolution, ec_sensor_temp -from .regression import apply_regressor +from .regression import apply_regressor \ No newline at end of file diff --git a/scdata/io/__init__.py b/scdata/io/__init__.py index 3f6b4146..4be3abb8 100644 --- a/scdata/io/__init__.py +++ b/scdata/io/__init__.py @@ -1,3 +1,4 @@ -from .csv import read_csv_file, export_csv_file, sdcard_concat -from .firmware import get_firmware_names +from .device_file import read_csv_file, export_csv_file, sdcard_concat#, CSVHandler +# TODO - Decide what to use with other handlers from API +# from .device_api import * from .model import model_load, model_export \ No newline at end of file From afdc2cde5a970d111b85f0174773a1b6e1c4aefa Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:05:59 +0200 Subject: [PATCH 37/72] Convert metrics and blueprints to models --- scdata/utils/meta.py | 56 +++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/scdata/utils/meta.py b/scdata/utils/meta.py index b3137c53..4e497827 100644 --- a/scdata/utils/meta.py +++ b/scdata/utils/meta.py @@ -8,6 +8,10 @@ from traceback import print_exc import json from re import sub +from pydantic import TypeAdapter +from typing import List +from scdata.utils.headers import process_headers +from scdata.models import Name, Blueprint, Metric def get_paths(): @@ -119,7 +123,6 @@ def load_env(env_file): return True def load_blueprints(urls): - blueprints = dict() for url in urls: if url is None: continue @@ -127,7 +130,7 @@ def load_blueprints(urls): _blueprint = get_json_from_url(url) if _nblueprint not in blueprints: - blueprints[_nblueprint] = _blueprint + blueprints[_nblueprint] = TypeAdapter(Blueprint).validate_python(_blueprint).dict() return blueprints @@ -209,35 +212,24 @@ def load_connectors(urls): return connectors def load_names(urls): - ''' - Loads names from urls. Names have to be unique in each - { - "SCD30_CO2": - { - "id": "158", - "title": "SCD30 CO2", - "unit": "ppm" - }, - ... - } - Parameters - ---------- - urls: [String] - json file urls - Returns - --------- - Dictionary containing names, otherwise None - ''' - + isn = True names = dict() - for url in urls: - try: - c = get_json_from_url(url) - _nc = basename(urlparse(str(url)).path).split('.')[0] - names[_nc] = c - except: - print(f'Problem loading names from {url}') - print_exc() - return None - return names \ No newline at end of file + for url in urls: + result = list() + _nc = basename(urlparse(str(url)).path).split('.')[0] + while isn: + r = get(url) + r.raise_for_status() + # If status code OK, retrieve data + h = process_headers(r.headers) + result += TypeAdapter(List[Name]).validate_python(r.json()) + + if 'next' in h: + if h['next'] == url: isn = False + elif h['next'] != url: url = h['next'] + else: + isn = False + names[_nc] = result + + return names From 7b79952dfebb61b6f6a490e184d857e62ba69d9e Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:06:23 +0200 Subject: [PATCH 38/72] WIP config update --- scdata/_config/config.py | 100 ++++++++++++++------------------------- 1 file changed, 35 insertions(+), 65 deletions(-) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 1bc53995..3d1f374c 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -41,21 +41,10 @@ class Config(object): # Timeout for http requests _timeout = 3 + _max_http_retries = 2 - ### --------------------------------------- - ### ----------------CRONTAB---------------- - ### --------------------------------------- - # Tabfile for cronjobs - _tabfile = 'tabfile' - - # Scheduler - _scheduler_interval_days = 1 - _device_scheduler = 'dschedule' - _scheduler_log = 'scheduler.log' - # Tasks - _postprocessing_interval_hours = 1 - _device_processor = 'dprocess' - _max_forward_retries = 2 + # Max concurrent requests + _max_concurrent_requests = 30 ### --------------------------------------- ### -----------------DATA------------------ @@ -114,7 +103,7 @@ class Config(object): ### -------------SMART CITIZEN------------- ### --------------------------------------- # # Urls - _base_postprocessing_url = 'https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/master/' + _base_postprocessing_url = 'https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/' _default_file_type = 'json' calibrations_urls = [ @@ -122,23 +111,24 @@ class Config(object): ] blueprints_urls = [ - f'{_base_postprocessing_url}blueprints/base.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/csic_station.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/muv_station.{_default_file_type}', - # f'{_base_postprocessing_url}blueprints/parrot_soil.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_20_station_iscape.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_21_station_iscape.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/base.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/csic_station.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/muv_station.{_default_file_type}', + # # f'{_base_postprocessing_url}blueprints/parrot_soil.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sc_20_station_iscape.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sc_21_station_iscape.{_default_file_type}', f'{_base_postprocessing_url}blueprints/sc_21_station_module.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_15.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_20.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21_sps30.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21_sen5x.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21_gps.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21_nilu.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sck_21_co2.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_21_water.{_default_file_type}' + # f'{_base_postprocessing_url}blueprints/sck.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_15.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_20.{_default_file_type}', + f'{_base_postprocessing_url}blueprints/sc_air_api.{_default_file_type}', + f'{_base_postprocessing_url}blueprints/sc_air_csv.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_21_sps30.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_21_sen5x.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_21_gps.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_21_nilu.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sck_21_co2.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sc_21_water.{_default_file_type}' ] connectors_urls = [ @@ -146,7 +136,8 @@ class Config(object): ] names_urls = [ - f'{_base_postprocessing_url}names/sc_sensor_names.{_default_file_type}' + # f'{_base_postprocessing_url}names/SCDevice.json' + 'https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/names/SCDevice.json' ] # Convertion table from API SC to Pandas @@ -163,7 +154,6 @@ class Config(object): ['ms','ms'] ) - ### --------------------------------------- ### -------------METRICS DATA-------------- ### --------------------------------------- @@ -242,6 +232,8 @@ class Config(object): # It accepts reverse operations - you don't need to put them twice but in reverse _unit_convertion_lut = ( + ['%rh', '%', 1, False], + ['ºC', 'degC', 1, False], ['ppm', 'ppb', 1000, False], ['mg/m3', 'ug/m3', 1000, False], ['mgm3', 'ugm3', 1000, False], @@ -550,7 +542,8 @@ class Config(object): 'description', 'latest_postprocessing', 'blueprint_loaded_from_url', - 'hardware_loaded_from_url' + 'hardware_loaded_from_url', + 'data' ] _csv_defaults = { @@ -613,12 +606,11 @@ def get_meta_data(self): namespath = join(self.paths['interim'], 'names.json') if self.data['reload_metadata'] or not exists(namespath): names = load_names(self.names_urls) - # sc_sensor_names = load_firmware_names(self.sensor_names_url_21) - # sc_sensor_names = load_api_names(self.sensors_api_names_url) namesreload = True - else: - with open(namespath, 'r') as file: names = json.load(file) - namesreload = False + # else: + # TODO Implement load of serialised model + # with open(namespath, 'r') as file: names = json.load(file) + # namesreload = False if blueprints is not None: self.blueprints = blueprints @@ -634,16 +626,15 @@ def get_meta_data(self): with open(conpath, 'w') as file: json.dump(connectors, file) if names is not None: self.names = names - if namesreload: - with open(namespath, 'w') as file: json.dump(names, file) + # if namesreload: + # TODO Implement dump of serialised model + # with open(namespath, 'w') as file: json.dump(names, file) # Find environment file in root or in scdata/ for clones if exists(join(self.paths['data'],'.env')): env_file = join(self.paths['data'],'.env') else: - print (f'No environment file found. \ - If you had an environment file (.env) before, \ - make sure its now here:') + print(f'No environment file found. If you had an environment file (.env) before, make sure its now here') print(join(self.paths['data'],'.env')) env_file = None @@ -682,24 +673,3 @@ def save(self): with open(_sccpath, "w") as cf: yaml.dump(c, cf) - def set_testing(self, env_file = None): - ''' - Convenience method for setting variables as development - in jupyterlab - Parameters - ---------- - None - Returns - ---------- - None - ''' - - print ('Setting test mode') - self._out_level = 'DEBUG' - self.framework = 'jupyterlab' - self._intermediate_plots = True - self._plot_out_level = 'DEBUG' - - # Load Environment - if env_file is not None and not self._env_file: - if load_env(env_file): self._env_file = True From 702ca9ea0c1b1f579f14c9b4596ebbb2c24dbc66 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 19:24:49 +0200 Subject: [PATCH 39/72] Rename utils to tools --- scdata/_config/config.py | 4 ++-- scdata/device/device.py | 6 +++--- scdata/device/process/__init__.py | 2 +- scdata/device/process/alphasense.py | 2 +- scdata/device/process/baseline.py | 2 +- scdata/device/process/regression.py | 2 +- scdata/device/process/timeseries.py | 2 +- scdata/io/device_api.py | 2 +- scdata/io/device_file.py | 2 +- scdata/io/model.py | 2 +- scdata/test/checks/checks.py | 4 ++-- scdata/test/dispersion/dispersion.py | 2 +- scdata/test/export/to_file.py | 2 +- scdata/test/plot/box_plot.py | 2 +- scdata/test/plot/heatmap_iplot.py | 2 +- scdata/test/plot/heatmap_plot.py | 2 +- scdata/test/plot/maps.py | 2 +- scdata/test/plot/plot_tools.py | 2 +- scdata/test/plot/scatter_dispersion_grid.py | 2 +- scdata/test/plot/scatter_iplot.py | 2 +- scdata/test/plot/scatter_plot.py | 2 +- scdata/test/plot/ts_dendrogram.py | 2 +- scdata/test/plot/ts_dispersion_grid.py | 2 +- scdata/test/plot/ts_dispersion_plot.py | 2 +- scdata/test/plot/ts_dispersion_uplot.py | 2 +- scdata/test/plot/ts_iplot.py | 2 +- scdata/test/plot/ts_plot.py | 2 +- scdata/test/plot/ts_scatter.py | 2 +- scdata/test/plot/ts_uplot.py | 2 +- scdata/test/test.py | 2 +- scdata/test/tools/combine.py | 2 +- scdata/test/tools/prepare.py | 2 +- scdata/{utils => tools}/__init__.py | 2 +- scdata/{utils => tools}/cleaning.py | 0 scdata/{utils => tools}/date.py | 0 scdata/{utils => tools}/dictmerge.py | 0 scdata/{utils => tools}/find.py | 2 +- scdata/{utils => tools}/headers.py | 0 scdata/{utils => tools}/interim/example.csv | 0 scdata/{utils => tools}/interim/geodata.csv | 0 scdata/{utils => tools}/lazy.py | 0 scdata/{utils => tools}/location.py | 2 +- scdata/{utils => tools}/meta.py | 2 +- scdata/{utils => tools}/other/clean_python.py | 0 scdata/{utils => tools}/other/concat_script.py | 0 scdata/{utils => tools}/other/csv_to_geojson.py | 0 scdata/{utils => tools}/other/csv_to_gpx.py | 0 scdata/{utils => tools}/other/data_xlsx.py | 0 .../other/get_monitoring_stations_muv.py | 0 .../other/get_reference_data_csic.py | 0 scdata/{utils => tools}/other/manage_post_info.py | 0 scdata/{utils => tools}/other/rename_yaml.ipynb | 0 scdata/{utils => tools}/other/xls_dublin.py | 0 scdata/{utils => tools}/out.py | 0 scdata/{utils => tools}/report.py | 0 scdata/{utils => tools}/stats.py | 0 scdata/{utils => tools}/units.py | 2 +- .../{utils => tools}/uploads/example_upload_1.json | 0 .../uploads/example_zenodo_upload.yaml | 0 scdata/{utils => tools}/uploads/report.pdf | Bin scdata/{utils => tools}/url_check.py | 0 scdata/{utils => tools}/zenodo.py | 4 ++-- scdata/{utils => tools}/zenodo_templates/README.md | 0 .../zenodo_templates/template_zenodo_dataset.json | 0 .../template_zenodo_publication.json | 0 65 files changed, 43 insertions(+), 43 deletions(-) rename scdata/{utils => tools}/__init__.py (93%) rename scdata/{utils => tools}/cleaning.py (100%) rename scdata/{utils => tools}/date.py (100%) rename scdata/{utils => tools}/dictmerge.py (100%) rename scdata/{utils => tools}/find.py (90%) rename scdata/{utils => tools}/headers.py (100%) rename scdata/{utils => tools}/interim/example.csv (100%) rename scdata/{utils => tools}/interim/geodata.csv (100%) rename scdata/{utils => tools}/lazy.py (100%) rename scdata/{utils => tools}/location.py (97%) rename scdata/{utils => tools}/meta.py (99%) rename scdata/{utils => tools}/other/clean_python.py (100%) rename scdata/{utils => tools}/other/concat_script.py (100%) rename scdata/{utils => tools}/other/csv_to_geojson.py (100%) rename scdata/{utils => tools}/other/csv_to_gpx.py (100%) rename scdata/{utils => tools}/other/data_xlsx.py (100%) rename scdata/{utils => tools}/other/get_monitoring_stations_muv.py (100%) rename scdata/{utils => tools}/other/get_reference_data_csic.py (100%) rename scdata/{utils => tools}/other/manage_post_info.py (100%) rename scdata/{utils => tools}/other/rename_yaml.ipynb (100%) rename scdata/{utils => tools}/other/xls_dublin.py (100%) rename scdata/{utils => tools}/out.py (100%) rename scdata/{utils => tools}/report.py (100%) rename scdata/{utils => tools}/stats.py (100%) rename scdata/{utils => tools}/units.py (98%) rename scdata/{utils => tools}/uploads/example_upload_1.json (100%) rename scdata/{utils => tools}/uploads/example_zenodo_upload.yaml (100%) rename scdata/{utils => tools}/uploads/report.pdf (100%) rename scdata/{utils => tools}/url_check.py (100%) rename scdata/{utils => tools}/zenodo.py (99%) rename scdata/{utils => tools}/zenodo_templates/README.md (100%) rename scdata/{utils => tools}/zenodo_templates/template_zenodo_dataset.json (100%) rename scdata/{utils => tools}/zenodo_templates/template_zenodo_publication.json (100%) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 3d1f374c..50618529 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -1,8 +1,8 @@ import yaml import json -from scdata.utils.dictmerge import dict_fmerge -from scdata.utils.meta import (get_paths, load_blueprints, +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.meta import (get_paths, load_blueprints, load_calibrations, load_connectors, load_env, load_names) diff --git a/scdata/device/device.py b/scdata/device/device.py index 8e6ff4cb..3b0b6bad 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -1,9 +1,9 @@ ''' Main implementation of class Device ''' -from scdata.utils import logger, localise_date, \ +from scdata.tools import logger, localise_date, \ dict_fmerge, get_units_convf from scdata.io import read_csv_file, export_csv_file -from scdata.utils import LazyCallable, url_checker, \ +from scdata.tools import LazyCallable, url_checker, \ get_json_from_url, find_by_field from scdata._config import config from scdata.io.device_api import * @@ -53,7 +53,7 @@ def model_post_init(self, __context) -> None: Default: 'sck_21' Defines the type of device. For instance: sck_21, sck_20, csic_station, muv_station parrot_soil, sc_20_station, sc_21_station... A list of all the blueprints is found in - config.blueprints_urls and accessible via the scdata.utils.load_blueprints(urls) function. + config.blueprints_urls and accessible via the scdata.tools.load_blueprints(urls) function. The blueprint can also be defined from the postprocessing info in SCAPI. The manual parameter passed here overrides that of the API. diff --git a/scdata/device/process/__init__.py b/scdata/device/process/__init__.py index 9f6d6b99..8fcd9ac4 100644 --- a/scdata/device/process/__init__.py +++ b/scdata/device/process/__init__.py @@ -1,6 +1,6 @@ ''' Implementation of different processes to be done in each device ''' -from scdata.utils import LazyCallable +from scdata.tools import LazyCallable from .formulae import absolute_humidity, exp_f, fit_exp_f from .geoseries import is_within_circle from .timeseries import clean_ts, merge_ts, rolling_avg, poly_ts, geo_located, time_derivative, delta_index_ts diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index 2f0ec7f5..317bf184 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, get_units_convf, find_dates, localise_date +from scdata.tools import logger, get_units_convf, find_dates, localise_date from scdata._config import config from scdata.device.process.params import * from scdata.device.process import baseline_calc, clean_ts diff --git a/scdata/device/process/baseline.py b/scdata/device/process/baseline.py index 4e5d1e32..50d2292b 100644 --- a/scdata/device/process/baseline.py +++ b/scdata/device/process/baseline.py @@ -7,7 +7,7 @@ from numpy import max as npmax from numpy import abs as npabs from numpy import argmax, argmin, arange, exp -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config from math import isnan from .formulae import exp_f diff --git a/scdata/device/process/regression.py b/scdata/device/process/regression.py index 1eb27c7c..0334c135 100644 --- a/scdata/device/process/regression.py +++ b/scdata/device/process/regression.py @@ -1,5 +1,5 @@ from scdata._config import config -from scdata.utils import logger, dict_fmerge, clean +from scdata.tools import logger, dict_fmerge, clean from pandas import DataFrame from numpy import array diff --git a/scdata/device/process/timeseries.py b/scdata/device/process/timeseries.py index 38a54ba3..1a8e0530 100644 --- a/scdata/device/process/timeseries.py +++ b/scdata/device/process/timeseries.py @@ -1,7 +1,7 @@ from numpy import nan, full, power, ones, diff, convolve, append from scipy import ndimage from scdata.device.process import is_within_circle -from scdata.utils import logger +from scdata.tools import logger def delta_index_ts(dataframe, **kwargs): result = dataframe.index.to_series().diff().astype('timedelta64[s]') diff --git a/scdata/io/device_api.py b/scdata/io/device_api.py index 94671ce9..827def8b 100644 --- a/scdata/io/device_api.py +++ b/scdata/io/device_api.py @@ -10,7 +10,7 @@ from geopy.distance import distance from scdata._config import config -from scdata.utils import logger, localise_date, clean, get_elevation, url_checker, process_headers +from scdata.tools import logger, localise_date, clean, get_elevation, url_checker, process_headers # from tzwhere import tzwhere from timezonefinder import TimezoneFinder from datetime import date, datetime diff --git a/scdata/io/device_file.py b/scdata/io/device_file.py index db2b9180..e3d546ea 100644 --- a/scdata/io/device_file.py +++ b/scdata/io/device_file.py @@ -1,6 +1,6 @@ from os import makedirs, listdir from os.path import exists, join, splitext -from scdata.utils import logger, localise_date, clean +from scdata.tools import logger, localise_date, clean from pandas import read_csv, to_datetime, DataFrame from scdata._config import config import csv diff --git a/scdata/io/model.py b/scdata/io/model.py index 923440f5..c0651bf6 100644 --- a/scdata/io/model.py +++ b/scdata/io/model.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger from joblib import dump, load from scdata._config import config from os.path import join, exists diff --git a/scdata/test/checks/checks.py b/scdata/test/checks/checks.py index 4ec57556..787cf766 100644 --- a/scdata/test/checks/checks.py +++ b/scdata/test/checks/checks.py @@ -1,10 +1,10 @@ -from scdata.utils import logger +from scdata.tools import logger import matplotlib.pyplot as plt import missingno as msno from pandas import to_datetime, DataFrame from scdata.test.plot.plot_tools import prepare_data from scdata._config import config -from scdata.utils.dictmerge import dict_fmerge +from scdata.tools.dictmerge import dict_fmerge def gaps_check(self, devices = None, channels = None, groupby = 'channel', **kwargs): if config.framework == 'jupyterlab': plt.ioff(); diff --git a/scdata/test/dispersion/dispersion.py b/scdata/test/dispersion/dispersion.py index d2481692..b693e3f4 100644 --- a/scdata/test/dispersion/dispersion.py +++ b/scdata/test/dispersion/dispersion.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, localise_date +from scdata.tools import logger, localise_date from pandas import DataFrame from scdata._config import config diff --git a/scdata/test/export/to_file.py b/scdata/test/export/to_file.py index a21ef10b..8dde9a79 100755 --- a/scdata/test/export/to_file.py +++ b/scdata/test/export/to_file.py @@ -2,7 +2,7 @@ from os.path import join, dirname, exists from os import makedirs -from scdata.utils import logger +from scdata.tools import logger import flask from re import sub diff --git a/scdata/test/plot/box_plot.py b/scdata/test/plot/box_plot.py index 656cd1d6..564c36cc 100644 --- a/scdata/test/plot/box_plot.py +++ b/scdata/test/plot/box_plot.py @@ -3,7 +3,7 @@ from matplotlib import style from seaborn import set_palette, boxplot # import seaborn as sns -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session diff --git a/scdata/test/plot/heatmap_iplot.py b/scdata/test/plot/heatmap_iplot.py index 90ca6895..3cd94c4c 100644 --- a/scdata/test/plot/heatmap_iplot.py +++ b/scdata/test/plot/heatmap_iplot.py @@ -1,5 +1,5 @@ from plotly.graph_objs import Heatmap, Layout, Figure -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session from plotly.offline import iplot diff --git a/scdata/test/plot/heatmap_plot.py b/scdata/test/plot/heatmap_plot.py index 58f3ad3c..9d8387e4 100644 --- a/scdata/test/plot/heatmap_plot.py +++ b/scdata/test/plot/heatmap_plot.py @@ -2,7 +2,7 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, heatmap -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session diff --git a/scdata/test/plot/maps.py b/scdata/test/plot/maps.py index f14010ca..ed4b159f 100644 --- a/scdata/test/plot/maps.py +++ b/scdata/test/plot/maps.py @@ -7,7 +7,7 @@ from math import isnan, floor, ceil from traceback import print_exc from pandas import cut, date_range -from scdata.utils import dict_fmerge, clean, logger +from scdata.tools import dict_fmerge, clean, logger from scdata._config import config from numpy import linspace, nan from branca import element diff --git a/scdata/test/plot/plot_tools.py b/scdata/test/plot/plot_tools.py index a5bf33b5..3cacb793 100644 --- a/scdata/test/plot/plot_tools.py +++ b/scdata/test/plot/plot_tools.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger from numpy import arange from pandas import cut, DataFrame, to_datetime, option_context, to_numeric import io diff --git a/scdata/test/plot/scatter_dispersion_grid.py b/scdata/test/plot/scatter_dispersion_grid.py index 3c7a6860..003a18ae 100644 --- a/scdata/test/plot/scatter_dispersion_grid.py +++ b/scdata/test/plot/scatter_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm diff --git a/scdata/test/plot/scatter_iplot.py b/scdata/test/plot/scatter_iplot.py index bd19ab90..18c6ba08 100644 --- a/scdata/test/plot/scatter_iplot.py +++ b/scdata/test/plot/scatter_iplot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from .scatter_plot import scatter_plot from scdata._config import config from plotly.io import renderers diff --git a/scdata/test/plot/scatter_plot.py b/scdata/test/plot/scatter_plot.py index cd02f1e4..c76cada2 100644 --- a/scdata/test/plot/scatter_plot.py +++ b/scdata/test/plot/scatter_plot.py @@ -2,7 +2,7 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, regplot, scatterplot, relplot -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, colors from numpy import array diff --git a/scdata/test/plot/ts_dendrogram.py b/scdata/test/plot/ts_dendrogram.py index a68bdb85..190e24e1 100644 --- a/scdata/test/plot/ts_dendrogram.py +++ b/scdata/test/plot/ts_dendrogram.py @@ -1,6 +1,6 @@ from scipy.cluster import hierarchy as hc from pandas import DataFrame -from scdata.utils import logger, dict_fmerge, clean +from scdata.tools import logger, dict_fmerge, clean from scdata._config import config import matplotlib.pyplot as plt from matplotlib import rcParams diff --git a/scdata/test/plot/ts_dispersion_grid.py b/scdata/test/plot/ts_dispersion_grid.py index 18b418f1..308c258e 100644 --- a/scdata/test/plot/ts_dispersion_grid.py +++ b/scdata/test/plot/ts_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm diff --git a/scdata/test/plot/ts_dispersion_plot.py b/scdata/test/plot/ts_dispersion_plot.py index 41d761c1..ea7c6edc 100644 --- a/scdata/test/plot/ts_dispersion_plot.py +++ b/scdata/test/plot/ts_dispersion_plot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.colors diff --git a/scdata/test/plot/ts_dispersion_uplot.py b/scdata/test/plot/ts_dispersion_uplot.py index 5ed167e5..41dddcfc 100644 --- a/scdata/test/plot/ts_dispersion_uplot.py +++ b/scdata/test/plot/ts_dispersion_uplot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import colors from scipy.stats import t diff --git a/scdata/test/plot/ts_iplot.py b/scdata/test/plot/ts_iplot.py index 744d6356..fcc24670 100644 --- a/scdata/test/plot/ts_iplot.py +++ b/scdata/test/plot/ts_iplot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data diff --git a/scdata/test/plot/ts_plot.py b/scdata/test/plot/ts_plot.py index ab05ea2c..a0b040a7 100644 --- a/scdata/test/plot/ts_plot.py +++ b/scdata/test/plot/ts_plot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data from pandas import to_datetime diff --git a/scdata/test/plot/ts_scatter.py b/scdata/test/plot/ts_scatter.py index bd01a7a6..a8034662 100644 --- a/scdata/test/plot/ts_scatter.py +++ b/scdata/test/plot/ts_scatter.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data diff --git a/scdata/test/plot/ts_uplot.py b/scdata/test/plot/ts_uplot.py index c923e060..3fe57a07 100644 --- a/scdata/test/plot/ts_uplot.py +++ b/scdata/test/plot/ts_uplot.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge +from scdata.tools import logger, dict_fmerge from scdata._config import config from .plot_tools import prepare_data, colors diff --git a/scdata/test/test.py b/scdata/test/test.py index ef2f8fb8..b91f9b73 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -13,7 +13,7 @@ from pydantic import TypeAdapter, BaseModel, ConfigDict, model_serializer from typing import Optional, List, Dict, Any -from scdata.utils import logger, localise_date, find_by_field +from scdata.tools import logger, localise_date, find_by_field from scdata.io import read_csv_file, export_csv_file from scdata._config import config from scdata.device import Device diff --git a/scdata/test/tools/combine.py b/scdata/test/tools/combine.py index 481b4db2..c98a9a75 100644 --- a/scdata/test/tools/combine.py +++ b/scdata/test/tools/combine.py @@ -1,5 +1,5 @@ from pandas import DataFrame -from scdata.utils import logger +from scdata.tools import logger from scdata.device import Device def combine(self, devices = None, readings = None): diff --git a/scdata/test/tools/prepare.py b/scdata/test/tools/prepare.py index 7b0282e1..99ea529f 100644 --- a/scdata/test/tools/prepare.py +++ b/scdata/test/tools/prepare.py @@ -1,4 +1,4 @@ -from scdata.utils import logger, dict_fmerge, clean +from scdata.tools import logger, dict_fmerge, clean from scdata._config import config from numpy import array from pandas import DataFrame diff --git a/scdata/utils/__init__.py b/scdata/tools/__init__.py similarity index 93% rename from scdata/utils/__init__.py rename to scdata/tools/__init__.py index 9d027cd4..c02fd9db 100644 --- a/scdata/utils/__init__.py +++ b/scdata/tools/__init__.py @@ -1,4 +1,4 @@ -from .out import logger +from .out import logger, set_logger_level from .date import localise_date, find_dates from .units import get_units_convf from .dictmerge import dict_fmerge diff --git a/scdata/utils/cleaning.py b/scdata/tools/cleaning.py similarity index 100% rename from scdata/utils/cleaning.py rename to scdata/tools/cleaning.py diff --git a/scdata/utils/date.py b/scdata/tools/date.py similarity index 100% rename from scdata/utils/date.py rename to scdata/tools/date.py diff --git a/scdata/utils/dictmerge.py b/scdata/tools/dictmerge.py similarity index 100% rename from scdata/utils/dictmerge.py rename to scdata/tools/dictmerge.py diff --git a/scdata/utils/find.py b/scdata/tools/find.py similarity index 90% rename from scdata/utils/find.py rename to scdata/tools/find.py index 73b3d10d..7a02fea8 100644 --- a/scdata/utils/find.py +++ b/scdata/tools/find.py @@ -1,4 +1,4 @@ -from scdata.utils import logger +from scdata.tools import logger def find_by_field(models, value, field): try: diff --git a/scdata/utils/headers.py b/scdata/tools/headers.py similarity index 100% rename from scdata/utils/headers.py rename to scdata/tools/headers.py diff --git a/scdata/utils/interim/example.csv b/scdata/tools/interim/example.csv similarity index 100% rename from scdata/utils/interim/example.csv rename to scdata/tools/interim/example.csv diff --git a/scdata/utils/interim/geodata.csv b/scdata/tools/interim/geodata.csv similarity index 100% rename from scdata/utils/interim/geodata.csv rename to scdata/tools/interim/geodata.csv diff --git a/scdata/utils/lazy.py b/scdata/tools/lazy.py similarity index 100% rename from scdata/utils/lazy.py rename to scdata/tools/lazy.py diff --git a/scdata/utils/location.py b/scdata/tools/location.py similarity index 97% rename from scdata/utils/location.py rename to scdata/tools/location.py index 1f07fbd0..2e76a1b1 100644 --- a/scdata/utils/location.py +++ b/scdata/tools/location.py @@ -1,6 +1,6 @@ from requests import get from pandas import json_normalize -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config def get_elevation(_lat = None, _long = None): diff --git a/scdata/utils/meta.py b/scdata/tools/meta.py similarity index 99% rename from scdata/utils/meta.py rename to scdata/tools/meta.py index 4e497827..66aa940f 100644 --- a/scdata/utils/meta.py +++ b/scdata/tools/meta.py @@ -10,7 +10,7 @@ from re import sub from pydantic import TypeAdapter from typing import List -from scdata.utils.headers import process_headers +from scdata.tools.headers import process_headers from scdata.models import Name, Blueprint, Metric def get_paths(): diff --git a/scdata/utils/other/clean_python.py b/scdata/tools/other/clean_python.py similarity index 100% rename from scdata/utils/other/clean_python.py rename to scdata/tools/other/clean_python.py diff --git a/scdata/utils/other/concat_script.py b/scdata/tools/other/concat_script.py similarity index 100% rename from scdata/utils/other/concat_script.py rename to scdata/tools/other/concat_script.py diff --git a/scdata/utils/other/csv_to_geojson.py b/scdata/tools/other/csv_to_geojson.py similarity index 100% rename from scdata/utils/other/csv_to_geojson.py rename to scdata/tools/other/csv_to_geojson.py diff --git a/scdata/utils/other/csv_to_gpx.py b/scdata/tools/other/csv_to_gpx.py similarity index 100% rename from scdata/utils/other/csv_to_gpx.py rename to scdata/tools/other/csv_to_gpx.py diff --git a/scdata/utils/other/data_xlsx.py b/scdata/tools/other/data_xlsx.py similarity index 100% rename from scdata/utils/other/data_xlsx.py rename to scdata/tools/other/data_xlsx.py diff --git a/scdata/utils/other/get_monitoring_stations_muv.py b/scdata/tools/other/get_monitoring_stations_muv.py similarity index 100% rename from scdata/utils/other/get_monitoring_stations_muv.py rename to scdata/tools/other/get_monitoring_stations_muv.py diff --git a/scdata/utils/other/get_reference_data_csic.py b/scdata/tools/other/get_reference_data_csic.py similarity index 100% rename from scdata/utils/other/get_reference_data_csic.py rename to scdata/tools/other/get_reference_data_csic.py diff --git a/scdata/utils/other/manage_post_info.py b/scdata/tools/other/manage_post_info.py similarity index 100% rename from scdata/utils/other/manage_post_info.py rename to scdata/tools/other/manage_post_info.py diff --git a/scdata/utils/other/rename_yaml.ipynb b/scdata/tools/other/rename_yaml.ipynb similarity index 100% rename from scdata/utils/other/rename_yaml.ipynb rename to scdata/tools/other/rename_yaml.ipynb diff --git a/scdata/utils/other/xls_dublin.py b/scdata/tools/other/xls_dublin.py similarity index 100% rename from scdata/utils/other/xls_dublin.py rename to scdata/tools/other/xls_dublin.py diff --git a/scdata/utils/out.py b/scdata/tools/out.py similarity index 100% rename from scdata/utils/out.py rename to scdata/tools/out.py diff --git a/scdata/utils/report.py b/scdata/tools/report.py similarity index 100% rename from scdata/utils/report.py rename to scdata/tools/report.py diff --git a/scdata/utils/stats.py b/scdata/tools/stats.py similarity index 100% rename from scdata/utils/stats.py rename to scdata/tools/stats.py diff --git a/scdata/utils/units.py b/scdata/tools/units.py similarity index 98% rename from scdata/utils/units.py rename to scdata/tools/units.py index 9fa4af14..f9d67aba 100644 --- a/scdata/utils/units.py +++ b/scdata/tools/units.py @@ -1,5 +1,5 @@ from re import search -from scdata.utils import logger +from scdata.tools import logger from scdata._config import config def get_units_convf(sensor, from_units): diff --git a/scdata/utils/uploads/example_upload_1.json b/scdata/tools/uploads/example_upload_1.json similarity index 100% rename from scdata/utils/uploads/example_upload_1.json rename to scdata/tools/uploads/example_upload_1.json diff --git a/scdata/utils/uploads/example_zenodo_upload.yaml b/scdata/tools/uploads/example_zenodo_upload.yaml similarity index 100% rename from scdata/utils/uploads/example_zenodo_upload.yaml rename to scdata/tools/uploads/example_zenodo_upload.yaml diff --git a/scdata/utils/uploads/report.pdf b/scdata/tools/uploads/report.pdf similarity index 100% rename from scdata/utils/uploads/report.pdf rename to scdata/tools/uploads/report.pdf diff --git a/scdata/utils/url_check.py b/scdata/tools/url_check.py similarity index 100% rename from scdata/utils/url_check.py rename to scdata/tools/url_check.py diff --git a/scdata/utils/zenodo.py b/scdata/tools/zenodo.py similarity index 99% rename from scdata/utils/zenodo.py rename to scdata/tools/zenodo.py index 475aab76..f37bf99c 100644 --- a/scdata/utils/zenodo.py +++ b/scdata/tools/zenodo.py @@ -1,8 +1,8 @@ ''' Implementation of zenodo export ''' from scdata._config import config -from scdata.utils import logger, get_tests_log -from scdata.utils.report import include_footer +from scdata.tools import logger, get_tests_log +from scdata.tools.report import include_footer from scdata import Test import json, yaml from os.path import join, dirname, getsize, exists diff --git a/scdata/utils/zenodo_templates/README.md b/scdata/tools/zenodo_templates/README.md similarity index 100% rename from scdata/utils/zenodo_templates/README.md rename to scdata/tools/zenodo_templates/README.md diff --git a/scdata/utils/zenodo_templates/template_zenodo_dataset.json b/scdata/tools/zenodo_templates/template_zenodo_dataset.json similarity index 100% rename from scdata/utils/zenodo_templates/template_zenodo_dataset.json rename to scdata/tools/zenodo_templates/template_zenodo_dataset.json diff --git a/scdata/utils/zenodo_templates/template_zenodo_publication.json b/scdata/tools/zenodo_templates/template_zenodo_publication.json similarity index 100% rename from scdata/utils/zenodo_templates/template_zenodo_publication.json rename to scdata/tools/zenodo_templates/template_zenodo_publication.json From 95950bb3414d38f9216e91ae062fb17b16e62814 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 20:12:20 +0200 Subject: [PATCH 40/72] Add sc_air_file fixes --- blueprints/sc_air_file.json | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/blueprints/sc_air_file.json b/blueprints/sc_air_file.json index b1831fb6..c1bb712b 100644 --- a/blueprints/sc_air_file.json +++ b/blueprints/sc_air_file.json @@ -257,9 +257,9 @@ } ], "source": { - "type": "csv", - "module": "scdata.io.csv", - "handler": "csv_handler", + "type": "file", + "module": "scdata.io.device_file", + "handler": "CSVHandler", "params": { "header_skip": [ 1, @@ -267,12 +267,9 @@ 3 ], "index": "TIME", - "sep": ",", - "tz-aware": true - }, - "files": { - "processed-data-file": null, - "raw-data-file": null + "separator": ",", + "tzaware": true, + "timezone": "UTC" } } } From c5a3bcc4d1c39346d8be65d2ca663fca59083cbb Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 20:54:47 +0200 Subject: [PATCH 41/72] Remove old blueprints --- blueprints/base.json | 18 - blueprints/csic_station.json | 74 ---- blueprints/muv_station.json | 64 --- blueprints/nilu_lab.json | 66 --- blueprints/parrot_soil.json | 12 - blueprints/sc_20_station_iscape.json | 207 ---------- blueprints/sc_21_station_iscape.json | 252 ------------ blueprints/sc_21_station_module.json | 587 --------------------------- blueprints/sc_21_water.json | 124 ------ blueprints/sck.json | 51 --- blueprints/sck_15.json | 45 -- blueprints/sck_20.json | 128 ------ blueprints/sck_21.json | 206 ---------- blueprints/sck_21_co2.json | 185 --------- blueprints/sck_21_gps.json | 205 ---------- blueprints/sck_21_nilu.json | 83 ---- blueprints/sck_21_sen5x.json | 251 ------------ blueprints/sck_21_sps30.json | 205 ---------- 18 files changed, 2763 deletions(-) delete mode 100644 blueprints/base.json delete mode 100644 blueprints/csic_station.json delete mode 100644 blueprints/muv_station.json delete mode 100644 blueprints/nilu_lab.json delete mode 100644 blueprints/parrot_soil.json delete mode 100644 blueprints/sc_20_station_iscape.json delete mode 100644 blueprints/sc_21_station_iscape.json delete mode 100644 blueprints/sc_21_station_module.json delete mode 100644 blueprints/sc_21_water.json delete mode 100644 blueprints/sck.json delete mode 100644 blueprints/sck_15.json delete mode 100644 blueprints/sck_20.json delete mode 100644 blueprints/sck_21.json delete mode 100644 blueprints/sck_21_co2.json delete mode 100644 blueprints/sck_21_gps.json delete mode 100644 blueprints/sck_21_nilu.json delete mode 100644 blueprints/sck_21_sen5x.json delete mode 100644 blueprints/sck_21_sps30.json diff --git a/blueprints/base.json b/blueprints/base.json deleted file mode 100644 index a990eff2..00000000 --- a/blueprints/base.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "clean_na": null, - "documentation": null, - "frequency": null, - "id": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": null, - "min_date": null, - "processed_data_file": null, - "raw_data_file": null, - "sensors": null, - "source": null, - "sources": null, - "version": null, - "forwarding": null -} \ No newline at end of file diff --git a/blueprints/csic_station.json b/blueprints/csic_station.json deleted file mode 100644 index e85a3c30..00000000 --- a/blueprints/csic_station.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://analisi.transparenciacatalunya.cat/", - "frequency": null, - "id": null, - "timezone": null, - "resample": false, - "max_date": null, - "min_date": null, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "C6H6": { - "id": "C6H6", - "units": "ug/m3" - }, - "CO": { - "id": "CO", - "units": "mg/m3" - }, - "H2S": { - "id": "H2S", - "units": "ug/m3" - }, - "NO": { - "id": "NO", - "units": "ug/m3" - }, - "NO2": { - "id": "NO2", - "units": "ug/m3" - }, - "NOX": { - "id": "NOX", - "units": "ug/m3" - }, - "O3": { - "id": "O3", - "units": "ug/m3" - }, - "PM_10": { - "id": "PM10", - "units": "ug/m3" - }, - "PM_1": { - "id": "PM1", - "units": "ug/m3" - }, - "PM_25": { - "id": "PM2.5", - "units": "ug/m3" - }, - "SO2": { - "id": "SO2", - "units": "ug/m3" - } - }, - "source": null, - "sources": { - "api": { - "handler": "DadesObertesApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 4 - ], - "index": "date", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/muv_station.json b/blueprints/muv_station.json deleted file mode 100644 index fef3917a..00000000 --- a/blueprints/muv_station.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://github.com/waagsociety/air_quality_sensor_kit/tree/master/MUV%20Kit", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "max_date": null, - "min_date": null, - "resample": false, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "GB_2A": { - "id": "no2op2", - "units": "mV" - }, - "GB_2W": { - "id": "no2op1", - "units": "mV" - }, - "GB_3A": { - "id": "o3op2", - "units": "mV" - }, - "GB_3W": { - "id": "o3op1", - "units": "mV" - }, - "HUM": { - "id": "h", - "units": "%rh" - }, - "NOISE_A": { - "id": "dB", - "units": "dBA" - }, - "PM_10": { - "id": "p10", - "units": "ug/m3" - }, - "PM_25": { - "id": "p25", - "units": "ug/m3" - }, - "TEMP": { - "id": "t", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "MuvApiDevice" - }, - "csv": { - "header_skip": null, - "index": "Time" - }, - "sep": ",", - "tz-aware": true - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/nilu_lab.json b/blueprints/nilu_lab.json deleted file mode 100644 index 90ab943a..00000000 --- a/blueprints/nilu_lab.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "clean_na": null, - "documentation": null, - "frequency": null, - "id": null, - "timezone": null, - "max_date": null, - "min_date": null, - "resample": false, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "CO": { - "id": "CO", - "units": "ppm" - }, - "NO": { - "id": "NO", - "units": "ppb" - }, - "NOX": { - "id": "NOx", - "units": "ppb" - }, - "NO2": { - "id": "NO2", - "units": "ppb" - }, - "O3": { - "id": "O3", - "units": "ppb" - }, - "SO2": { - "id": "SO2", - "units": "ppb" - }, - "TEMP_A": { - "id": "Temp_Ch_A", - "units": "degC" - }, - "TEMP_B": { - "id": "Temp_Ch_B", - "units": "degC" - }, - "HUM_A": { - "id": "RH_Ch_A", - "units": "%rh" - }, - "HUM_B": { - "id": "RH_Ch_B", - "units": "%rh" - } - }, - "source": null, - "sources": { - "csv": { - "header_skip": [ - 1 - ], - "index": ["DATE", "TIME"], - "sep": " ", - "tz-aware": false - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/parrot_soil.json b/blueprints/parrot_soil.json deleted file mode 100644 index fa02cbc3..00000000 --- a/blueprints/parrot_soil.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "clean_na": null, - "documentation": null, - "frequency": null, - "id": null, - "timezone": null, - "resample": false, - "max_date": null, - "min_date": null, - "source": null, - "version": null -} \ No newline at end of file diff --git a/blueprints/sc_20_station_iscape.json b/blueprints/sc_20_station_iscape.json deleted file mode 100644 index 019abf5a..00000000 --- a/blueprints/sc_20_station_iscape.json +++ /dev/null @@ -1,207 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CO": { - "desc": "CO calculation based on simple electrode substraction", - "kwargs": { - "auxiliary": "GB_1A", - "hardware": "alphadelta", - "id": null, - "pollutant": "CO", - "working": "GB_1W" - }, - "process": "basic_4electrode_alg", - "units": "ppm", - "post": false, - "id": null - }, - "EXT_PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_10", - "EXT_PM_B_10" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "EXT_PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM1 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_1", - "EXT_PM_B_1" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "EXT_PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_25", - "EXT_PM_B_25" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "plaftorm_id": 19, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "EXT_PM_A_1": { - "desc": "PM1 measurement from PMS5003 A", - "id": "71", - "units": "ug/m3" - }, - "EXT_PM_A_10": { - "desc": "PM10 measurement from PMS5003 A", - "id": "73", - "units": "ug/m3" - }, - "EXT_PM_A_25": { - "desc": "PM2.5 measurement from PMS5003 A", - "id": "72", - "units": "ug/m3" - }, - "EXT_PM_B_1": { - "desc": "PM1 measurement from PMS5003 B", - "id": "75", - "units": "ug/m3" - }, - "EXT_PM_B_10": { - "desc": "PM10 measurement from PMS5003 B", - "id": "77", - "units": "ug/m3" - }, - "EXT_PM_B_25": { - "desc": "PM2.5 measurement from PMS5003 B", - "id": "76", - "units": "ug/m3" - }, - "EXT_TEMP": { - "desc": "Dallas External probe Temperature", - "id": "96", - "units": "degC" - }, - "GB_1A": { - "desc": "Auxiliary electrode 1", - "id": "65", - "units": "mV" - }, - "GB_1W": { - "desc": "Working electrode 1", - "id": "64", - "units": "mV" - }, - "GB_2A": { - "desc": "Auxiliary electrode 2", - "id": "62", - "units": "mV" - }, - "GB_2W": { - "desc": "Working electrode 2", - "id": "61", - "units": "mV" - }, - "GB_3A": { - "desc": "Auxiliary electrode 3", - "id": "68", - "units": "mV" - }, - "GB_3W": { - "desc": "Working electrode 3", - "id": "67", - "units": "mV" - }, - "GB_HUM": { - "desc": "Gases board humidity (SHT31)", - "id": "80", - "units": "%rh" - }, - "GB_TEMP": { - "desc": "Gases board temperature (SHT31)", - "id": "79", - "units": "degC" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sc_21_station_iscape.json b/blueprints/sc_21_station_iscape.json deleted file mode 100644 index 51fbf51a..00000000 --- a/blueprints/sc_21_station_iscape.json +++ /dev/null @@ -1,252 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "CO": { - "desc": "CO calculation based on simple electrode substraction", - "kwargs": { - "auxiliary": "GB_1A", - "hardware": "alphadelta", - "id": null, - "pollutant": "CO", - "working": "GB_1W" - }, - "process": "basic_4electrode_alg", - "units": "ppm", - "post": false, - "id": null - }, - "EXT_PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_10", - "EXT_PM_B_10" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "EXT_PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM1 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_1", - "EXT_PM_B_1" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "EXT_PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_25", - "EXT_PM_B_25" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "plaftorm_id": null, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "id": "113", - "units": "ppm" - }, - "EXT_PM_A_1": { - "desc": "PM1 measurement from PMS5003 A", - "id": "71", - "units": "ug/m3" - }, - "EXT_PM_A_10": { - "desc": "PM10 measurement from PMS5003 A", - "id": "73", - "units": "ug/m3" - }, - "EXT_PM_A_25": { - "desc": "PM2.5 measurement from PMS5003 A", - "id": "72", - "units": "ug/m3" - }, - "EXT_PM_B_1": { - "desc": "PM1 measurement from PMS5003 B", - "id": "75", - "units": "ug/m3" - }, - "EXT_PM_B_10": { - "desc": "PM10 measurement from PMS5003 B", - "id": "77", - "units": "ug/m3" - }, - "EXT_PM_B_25": { - "desc": "PM2.5 measurement from PMS5003 B", - "id": "76", - "units": "ug/m3" - }, - "EXT_TEMP": { - "desc": "Dallas External probe Temperature", - "id": "96", - "units": "degC" - }, - "GB_1A": { - "desc": "Auxiliary electrode 1", - "id": "65", - "units": "mV" - }, - "GB_1W": { - "desc": "Working electrode 1", - "id": "64", - "units": "mV" - }, - "GB_2A": { - "desc": "Auxiliary electrode 2", - "id": "62", - "units": "mV" - }, - "GB_2W": { - "desc": "Working electrode 2", - "id": "61", - "units": "mV" - }, - "GB_3A": { - "desc": "Auxiliary electrode 3", - "id": "68", - "units": "mV" - }, - "GB_3W": { - "desc": "Working electrode 3", - "id": "67", - "units": "mV" - }, - "GB_HUM": { - "desc": "Gases board humidity (SHT31)", - "id": "80", - "units": "%rh" - }, - "GB_TEMP": { - "desc": "Gases board temperature (SHT31)", - "id": "79", - "units": "degC" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sc_21_station_module.json b/blueprints/sc_21_station_module.json deleted file mode 100644 index b47333c4..00000000 --- a/blueprints/sc_21_station_module.json +++ /dev/null @@ -1,587 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "EXT_PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_10", - "EXT_PM_B_10" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 88 - }, - "EXT_PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM1 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_1", - "EXT_PM_B_1" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 89 - }, - "EXT_PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "factor": 0.3, - "limits": [ - 0, - 1000 - ], - "names": [ - "EXT_PM_A_25", - "EXT_PM_B_25" - ], - "pick": "min", - "window_size": 5, - "window_type": null - }, - "process": "merge_ts", - "units": "ug/m3", - "post": true, - "id": 87 - }, - "PT1000_POS": { - "desc": "PT1000 raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "ASPT1000": { - "desc": "PT1000 temperature calculation in AFE", - "id": null, - "kwargs": { - "pt1000minus": null, - "from_date": null, - "timezone": null, - "to_date": null, - "pt1000plus": null, - "afe_id": null - }, - "post": false, - "process": "alphasense_pt1000", - "units": "degC" - }, - "EC_SENSOR_TEMP": { - "desc": "Electrochemical sensor temperature", - "id": "X013", - "kwargs": { - "priority": "ASPT1000" - }, - "post": false, - "process": "ec_sensor_temp", - "units": "degC" - }, - "CO_WE": { - "desc": "CO working electrode raw value", - "id": "X001", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "CO_AE": { - "desc": "CO auxiliary electrode raw value", - "id": "X002", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "NO2_WE": { - "desc": "NO2 working electrode raw value", - "id": "X003", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "NO2_AE": { - "desc": "NO2 auxiliary electrode raw value", - "id": "X004", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "NO_WE": { - "desc": "NO working electrode raw value", - "id": "X005", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "NO_AE": { - "desc": "NO auxiliary electrode raw value", - "id": "X006", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "SO2_WE": { - "desc": "SO2 working electrode raw value", - "id": "X007", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "SO2_AE": { - "desc": "SO2 auxiliary electrode raw value", - "id": "X008", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "H2S_WE": { - "desc": "H2S working electrode raw value", - "id": "X009", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "H2S_AE": { - "desc": "H2S auxiliary electrode raw value", - "id": "X010", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "OX_WE": { - "desc": "OX working electrode raw value", - "id": "X011", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "OX_AE": { - "desc": "OX auxiliary electrode raw value", - "id": "X012", - "kwargs": { - "channel": null - }, - "post": false, - "process": "channel_names", - "units": "V" - }, - "CO": { - "desc": "Calculation of CO based on AAN 803-04", - "id": 152, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - }, - "NO2": { - "desc": "Calculation of NO2 based on AAN 803-04", - "id": 153, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - }, - "O3": { - "desc": "Calculation of O3 based on AAN 803-04", - "id": 157, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - }, - "SO2": { - "desc": "Calculation of SO2 based on AAN 803-04", - "id": 155, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null, - "use_alternative": true - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - }, - "NO": { - "desc": "Calculation of NO based on AAN 803-04", - "id": 154, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - }, - "H2S": { - "desc": "Calculation of H2S based on AAN 803-04", - "id": 156, - "kwargs": { - "ae": null, - "alphasense_id": null, - "from_date": null, - "timezone": null, - "t": "EC_SENSOR_TEMP", - "to_date": null, - "we": null - }, - "post": true, - "process": "alphasense_803_04", - "units": "ppb" - } - }, - "min_date": null, - "platform_id": 33, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "ADC_48_0": { - "desc": "Channel 0 of ADC 0x48", - "id": 133, - "units": "V" - }, - "ADC_48_1": { - "desc": "Channel 1 of ADC 0x48", - "id": 134, - "units": "V" - }, - "ADC_48_2": { - "desc": "Channel 2 of ADC 0x48", - "id": 135, - "units": "V" - }, - "ADC_48_3": { - "desc": "Channel 3 of ADC 0x48", - "id": 136, - "units": "V" - }, - "ADC_49_0": { - "desc": "Channel 0 of ADC 0x49", - "id": 138, - "units": "V" - }, - "ADC_49_1": { - "desc": "Channel 1 of ADC 0x49", - "id": 139, - "units": "V" - }, - "ADC_49_2": { - "desc": "Channel 2 of ADC 0x49", - "id": 140, - "units": "V" - }, - "ADC_49_3": { - "desc": "Channel 3 of ADC 0x49", - "id": 141, - "units": "V" - }, - "ADC_4A_0": { - "desc": "Channel 0 of ADC 0x4A", - "id": 143, - "units": "V" - }, - "ADC_4A_1": { - "desc": "Channel 1 of ADC 0x4A", - "id": 144, - "units": "V" - }, - "ADC_4A_2": { - "desc": "Channel 2 of ADC 0x4A", - "id": 145, - "units": "V" - }, - "ADC_4A_3": { - "desc": "Channel 3 of ADC 0x4A", - "id": 146, - "units": "V" - }, - "ADC_4B_0": { - "desc": "Channel 0 of ADC 0x4B", - "id": 148, - "units": "V" - }, - "ADC_4B_1": { - "desc": "Channel 1 of ADC 0x4B", - "id": 149, - "units": "V" - }, - "ADC_4B_2": { - "desc": "Channel 2 of ADC 0x4B", - "id": 150, - "units": "V" - }, - "ADC_4B_3": { - "desc": "Channel 3 of ADC 0x4B", - "id": 151, - "units": "V" - }, - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppm" - }, - "PMS5003_EXT_PM_A_1": { - "desc": "PM1 measurement from PMS5003 A", - "id": "71", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_A_10": { - "desc": "PM10 measurement from PMS5003 A", - "id": "73", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_A_25": { - "desc": "PM2.5 measurement from PMS5003 A", - "id": "72", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_B_1": { - "desc": "PM1 measurement from PMS5003 B", - "id": "75", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_B_10": { - "desc": "PM10 measurement from PMS5003 B", - "id": "77", - "units": "ug/m3" - }, - "PMS5003_EXT_PM_B_25": { - "desc": "PM2.5 measurement from PMS5003 B", - "id": "76", - "units": "ug/m3" - }, - "PMS5003_PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "89", - "units": "ug/m3" - }, - "PMS5003_PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "88", - "units": "ug/m3" - }, - "PMS5003_PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "87", - "units": "ug/m3" - }, - "SHT31_EXT_HUM": { - "desc": "External SHT31 humidity", - "id": "80", - "units": "%rh" - }, - "SHT31_EXT_TEMP": { - "desc": "External SHT31 temperature", - "id": "79", - "units": "degC" - }, - "PM_DALLAS_TEMP": { - "desc": "Dallas External probe Temperature", - "id": "96", - "units": "degC" - }, - "GPS_LAT": { - "desc": "GPS lat", - "id": null, - "units": "degC" - }, - "GPS_LONG": { - "desc": "GPS long", - "id": null, - "units": "degC" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - }, - "SCD30_TEMP": { - "desc": "External temperature (SCD30)", - "id": "160", - "units": "degC" - }, - "SCD30_HUM": { - "desc": "External humidity (SCD30)", - "id": "161", - "units": "%rh" - }, - "SCD30_CO2": { - "desc": "CO2 (SCD30)", - "id": "158", - "units": "ppm" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} diff --git a/blueprints/sc_21_water.json b/blueprints/sc_21_water.json deleted file mode 100644 index 1652fcec..00000000 --- a/blueprints/sc_21_water.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "max_date": null, - "resample": false, - "metrics": {}, - "min_date": null, - "platform_id": 31, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "AS_COND": { - "desc": "Atlas Conductivity", - "id": "45", - "units": "uS/cm" - }, - "AS_ORP": { - "desc": "Atlas Redox Potential", - "id": "164", - "units": "mV" - }, - "AS_PH": { - "desc": "Atlas pH", - "id": "43", - "units": "-" - }, - "AS_DO": { - "desc": "Atlas Dissolved Oxygen", - "id": "48", - "units": "mg/L" - }, - "AS_DO_SAT": { - "desc": "Atlas Oxygen Saturation", - "id": "49", - "units": "%" - }, - "AS_SG": { - "desc": "Atlas Specific Gravity", - "id": "46", - "units": "sg" - }, - "AS_TDS": { - "desc": "Atlas Total Dissolved Solids", - "id": "122", - "units": "ppm" - }, - "AS_SAL": { - "desc": "Atlas Salinity", - "id": "51", - "units": "PSU" - }, - "AS_TEMP": { - "desc": "Atlas Temperature", - "id": "44", - "units": "degC" - }, - "PM_DALLAS_TEMP": { - "desc": "Dallas External probe Temperature", - "id": "96", - "units": "degC" - }, - "GPS_ALT": { - "desc": "GPS Altitude", - "id": "127", - "units": "m" - }, - "GPS_DIL": { - "desc": "GPS Horizontal Dilution of Position", - "id": "131", - "units": "#" - }, - "GPS_FIX_QUALITY": { - "desc": "GPS Fix Quality", - "id": "128", - "units": "#" - }, - "GPS_LAT": { - "desc": "GPS Latitude", - "id": "125", - "units": "deg" - }, - "GPS_LONG": { - "desc": "GPS Longitude", - "id": "126", - "units": "deg" - }, - "GPS_SAT_N": { - "desc": "GPS Traked Satellites", - "id": "130", - "units": "#" - }, - "GPS_SPEED": { - "desc": "GPS Speed", - "id": "129", - "units": "m/s" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} diff --git a/blueprints/sck.json b/blueprints/sck.json deleted file mode 100644 index 5d747ac5..00000000 --- a/blueprints/sck.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "clean_na": null, - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "min_date": null, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": false - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_15.json b/blueprints/sck_15.json deleted file mode 100644 index 30f0c859..00000000 --- a/blueprints/sck_15.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "timezone": null, - "resample": false, - "max_date": null, - "min_date": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": null, - "index": "Time", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_20.json b/blueprints/sck_20.json deleted file mode 100644 index 486c666e..00000000 --- a/blueprints/sck_20.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 11, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "89", - "units": "ug/m3" - }, - "PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "88", - "units": "ug/m3" - }, - "PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "87", - "units": "ug/m3" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21.json b/blueprints/sck_21.json deleted file mode 100644 index 2116a1ad..00000000 --- a/blueprints/sck_21.json +++ /dev/null @@ -1,206 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 28, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppb" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "PMS5003_PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "89", - "units": "ug/m3" - }, - "PMS5003_PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "88", - "units": "ug/m3" - }, - "PMS5003_PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "87", - "units": "ug/m3" - }, - "PMS5003_PN_03": - { - "id": "165", - "desc": "PN0.5 measurement from PMS5003", - "units": "#/0.1l" - }, - "PMS5003_PN_05": - { - "id": "166", - "desc": "PN0.5 measurement from PMS5003", - "units": "#/0.1l" - }, - "PMS5003_PN_1": - { - "id": "167", - "desc": "PN1.0 measurement from PMS5003", - "units": "#/0.1l" - }, - "PMS5003_PN_25": - { - "id": "168", - "desc": "PN2.5 measurement from PMS5003", - "units": "#/0.1l" - }, - "PMS5003_PN_5": - { - "id": "169", - "desc": "PN5.0 measurement from PMS5003", - "units": "#/0.1l" - }, - "PMS5003_PN_10": - { - "id": "170", - "desc": "PN10.0 measurement from PMS5003", - "units": "#/0.1l" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21_co2.json b/blueprints/sck_21_co2.json deleted file mode 100644 index fdf8b36b..00000000 --- a/blueprints/sck_21_co2.json +++ /dev/null @@ -1,185 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 35, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppb" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "PMS5003_PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "89", - "units": "ug/m3" - }, - "PMS5003_PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "88", - "units": "ug/m3" - }, - "PMS5003_PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "87", - "units": "ug/m3" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "SCD30_TEMP": { - "desc": "External temperature (SCD30)", - "id": "160", - "units": "degC" - }, - "SCD30_HUM": { - "desc": "External humidity (SCD30)", - "id": "161", - "units": "%rh" - }, - "SCD30_CO2": { - "desc": "CO2 (SCD30)", - "id": "158", - "units": "ppm" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21_gps.json b/blueprints/sck_21_gps.json deleted file mode 100644 index b7cd5794..00000000 --- a/blueprints/sck_21_gps.json +++ /dev/null @@ -1,205 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both PMS5003 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both PMS5003 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 32, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppm" - }, - "GPS_ALT": { - "desc": "GPS Altitude", - "id": "127", - "units": "m" - }, - "GPS_DIL": { - "desc": "GPS Horizontal Dilution of Position", - "id": "131", - "units": "#" - }, - "GPS_FIX_QUALITY": { - "desc": "GPS Fix Quality", - "id": "128", - "units": "#" - }, - "GPS_LAT": { - "desc": "GPS Latitude", - "id": "125", - "units": "deg" - }, - "GPS_LONG": { - "desc": "GPS Longitude", - "id": "126", - "units": "deg" - }, - "GPS_SAT_N": { - "desc": "GPS Traked Satellites", - "id": "130", - "units": "#" - }, - "GPS_SPEED": { - "desc": "GPS Speed", - "id": "129", - "units": "m/s" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "PMS5003_PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "89", - "units": "ug/m3" - }, - "PMS5003_PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "88", - "units": "ug/m3" - }, - "PMS5003_PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "87", - "units": "ug/m3" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21_nilu.json b/blueprints/sck_21_nilu.json deleted file mode 100644 index c8579b09..00000000 --- a/blueprints/sck_21_nilu.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": null, - "min_date": null, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "60", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "55", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "54", - "units": "ppb" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "5", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "56", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "57", - "units": "dBA" - }, - "PMS5003_PM_1": { - "desc": "PM1 measurement from PMS5003", - "id": "11", - "units": "ug/m3" - }, - "PMS5003_PM_10": { - "desc": "PM10 measurement from PMS5003", - "id": "12", - "units": "ug/m3" - }, - "PMS5003_PM_25": { - "desc": "PM2.5 measurement from PMS5003", - "id": "13", - "units": "ug/m3" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "1", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "15", - "units": "degC" - } - }, - "source": "api", - "sources": { - "api": { - "handler": "NiluApiDevice" - }, - "csv": { - "header_skip": null, - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21_sen5x.json b/blueprints/sck_21_sen5x.json deleted file mode 100644 index 05529f34..00000000 --- a/blueprints/sck_21_sen5x.json +++ /dev/null @@ -1,251 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both SPS30 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both SPS30 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both SPS30 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 42, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppb" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "SEN5X_PM_1": - { - "id": "193", - "desc": "PM1 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_10": - { - "id": "196", - "desc": "PM10 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_25": - { - "id": "194", - "desc": "PM2.5 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PM_4": - { - "id": "195", - "desc": "PM4.0 measurement from SEN5X", - "units": "ug/m3" - }, - "SEN5X_PN_05": - { - "id": "197", - "desc": "PN0.5 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_1": - { - "id": "198", - "desc": "PN1.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_25": - { - "id": "199", - "desc": "PN2.5 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_4": - { - "id": "200", - "desc": "PN4.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_PN_10": - { - "id": "201", - "desc": "PN10.0 measurement from SEN5X", - "units": "#/0.1l" - }, - "SEN5X_TPSIZE": - { - "id": "202", - "desc": "Typical Particle Size from SEN5X", - "units": "um" - }, - "SEN5X_TEMP": - { - "id": "204", - "desc": "SEN5X Temperature", - "units": "degC" - }, - "SEN5X_HUM": - { - "id": "203", - "desc": "SEN5X Humidity", - "units": "%rh" - }, - "SEN5X_VOCS_IDX": - { - "id": "205", - "desc": "SEN5X VOCs Index", - "units": "" - }, - "SEN5X_NOX_IDX": - { - "id": "206", - "desc": "SEN5X NOX Index", - "units": "" - }, - "SEN5X_VOCS_RAW": - { - "id": "207", - "desc": "SEN5X VOCs raw", - "units": "" - }, - "SEN5X_NOX_RAW": - { - "id": "208", - "desc": "SEN5X NOX raw", - "units": "" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file diff --git a/blueprints/sck_21_sps30.json b/blueprints/sck_21_sps30.json deleted file mode 100644 index f8062537..00000000 --- a/blueprints/sck_21_sps30.json +++ /dev/null @@ -1,205 +0,0 @@ -{ - "clean_na": null, - "documentation": "https://docs.smartcitizen.me/", - "frequency": null, - "id": null, - "info_data_file": null, - "timezone": null, - "resample": false, - "max_date": null, - "metrics": { - "CCS811_ECO2_CLEAN": { - "desc": "eCO2 cleaned data", - "kwargs": { - "limits": [ - 400, - 65000 - ], - "name": "CCS811_ECO2", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppm", - "post": false, - "id": null - }, - "CCS811_VOCS_CLEAN": { - "desc": "Volatile Organic Compounds cleaned data", - "kwargs": { - "limits": [ - 0, - 65000 - ], - "name": "CCS811_VOCS", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ppb", - "post": false, - "id": null - }, - "PM_10_CLEAN": { - "desc": "PM10 calculated based on both SPS30 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_10", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_1_CLEAN": { - "desc": "PM1 calculated based on both SPS30 PM10 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_1", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - }, - "PM_25_CLEAN": { - "desc": "PM2.5 calculated based on both SPS30 PM2.5 inputs", - "kwargs": { - "limits": [ - 0, - 1000 - ], - "name": "PM_25", - "window_size": 5, - "window_type": null - }, - "process": "clean_ts", - "units": "ug/m3", - "post": false, - "id": null - } - }, - "min_date": null, - "platform_id": 41, - "processed_data_file": null, - "raw_data_file": null, - "sensors": { - "BATT": { - "desc": "Device battery status", - "id": "10", - "units": "%" - }, - "CCS811_ECO2": { - "desc": "Equivalent CO2", - "id": "112", - "units": "ppm" - }, - "CCS811_VOCS": { - "desc": "total Volatile Organics Compounds", - "id": "113", - "units": "ppb" - }, - "HUM": { - "desc": "Urban board humidity (SHT31)", - "id": "56", - "units": "%rh" - }, - "LIGHT": { - "desc": "Urban board ambient light", - "id": "14", - "units": "lux" - }, - "NOISE_A": { - "desc": "A-scale noise SPL", - "id": "53", - "units": "dBA" - }, - "SPS30_PM_1": { - "desc": "PM1 measurement from SPS30", - "id": "182", - "units": "ug/m3" - }, - "SPS30_PM_10": { - "desc": "PM10 measurement from SPS30", - "id": "185", - "units": "ug/m3" - }, - "SPS30_PM_25": { - "desc": "PM2.5 measurement from SPS30", - "id": "183", - "units": "ug/m3" - }, - "SPS30_PM_4": { - "desc": "PM4.0 measurement from SPS30", - "id": "184", - "units": "ug/m3" - }, - "SPS30_PN_05": { - "desc": "PN0.5 measurement from SPS30", - "id": "186", - "units": "#/0.1l" - }, - "SPS30_PN_1": { - "desc": "PN1.0 measurement from SPS30", - "id": "187", - "units": "#/0.1l" - }, - "SPS30_PN_25": { - "desc": "PN2.5 measurement from SPS30", - "id": "188", - "units": "#/0.1l" - }, - "SPS30_PN_4": { - "desc": "PN4.0 measurement from SPS30", - "id": "189", - "units": "#/0.1l" - }, - "SPS30_PN_10": { - "desc": "PN10.0 measurement from SPS30", - "id": "190", - "units": "#/0.1l" - }, - "SPS30_TPSIZE": { - "desc": "Typical Particle Size from SPS30", - "id": "191", - "units": "um" - }, - "PRESS": { - "desc": "Atmospheric pressure", - "id": "58", - "units": "kPa" - }, - "TEMP": { - "desc": "Urban board temperature (SHT31)", - "id": "55", - "units": "degC" - } - }, - "source": null, - "sources": { - "api": { - "handler": "ScApiDevice" - }, - "csv": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "sep": ",", - "tz-aware": true - } - }, - "version": null -} \ No newline at end of file From 31b5c4a4d17d9f7223f81348a346997a8f8f2d77 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Sun, 7 Apr 2024 20:55:01 +0200 Subject: [PATCH 42/72] Remove source from blueprints --- blueprints/{sc_air_api.json => sc_air.json} | 7 +- blueprints/sc_air_file.json | 275 ------------------ .../{sc_water_api.json => sc_water.json} | 0 3 files changed, 1 insertion(+), 281 deletions(-) rename blueprints/{sc_air_api.json => sc_air.json} (98%) delete mode 100644 blueprints/sc_air_file.json rename blueprints/{sc_water_api.json => sc_water.json} (100%) diff --git a/blueprints/sc_air_api.json b/blueprints/sc_air.json similarity index 98% rename from blueprints/sc_air_api.json rename to blueprints/sc_air.json index 5a0c6d58..d3a81862 100644 --- a/blueprints/sc_air_api.json +++ b/blueprints/sc_air.json @@ -255,10 +255,5 @@ "function": "alphasense_803_04", "unit": "ppb" } - ], - "source":{ - "type": "api", - "module": "smartcitizen_connector", - "handler": "SCDevice" - } + ] } diff --git a/blueprints/sc_air_file.json b/blueprints/sc_air_file.json deleted file mode 100644 index c1bb712b..00000000 --- a/blueprints/sc_air_file.json +++ /dev/null @@ -1,275 +0,0 @@ -{ - "meta": { - "documentation": "https://docs.smartcitizen.me/" - }, - "metrics": [ - { - "name": "PT1000_POS", - "description": "PT1000 raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "ASPT1000", - "description": "PT1000 temperature calculation in AFE", - "id": null, - "kwargs": { - "pt1000minus": null, - "pt1000plus": null, - "afe_id": null - }, - "post": false, - "function": "alphasense_pt1000", - "unit": "degC" - }, - { - "name": "EC_SENSOR_TEMP", - "description": "Electrochemical sensor temperature", - "id": null, - "kwargs": { - "priority": "ASPT1000" - }, - "post": false, - "function": "ec_sensor_temp", - "unit": "degC" - }, - { - "name": "CO_WE", - "description": "CO working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "CO_AE", - "description": "CO auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "NO2_WE", - "description": "NO2 working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "NO2_AE", - "description": "NO2 auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "NO_WE", - "description": "NO working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "NO_AE", - "description": "NO auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "SO2_WE", - "description": "SO2 working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "SO2_AE", - "description": "SO2 auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "H2S_WE", - "description": "H2S working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "H2S_AE", - "description": "H2S auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "OX_WE", - "description": "OX working electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name": "OX_AE", - "description": "OX auxiliary electrode raw value", - "id": null, - "kwargs": { - "channel": null - }, - "post": false, - "function": "channel_names", - "unit": "V" - }, - { - "name":"CO", - "description": "Calculation of CO based on AAN 803-04", - "id": 152, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - }, - { - "name":"NO2", - "description": "Calculation of NO2 based on AAN 803-04", - "id": 153, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - }, - { - "name":"O3", - "description": "Calculation of O3 based on AAN 803-04", - "id": 157, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - }, - { - "name":"SO2", - "description": "Calculation of SO2 based on AAN 803-04", - "id": 155, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null, - "use_alternative": true - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - }, -{ - "name": "NO", - "description": "Calculation of NO based on AAN 803-04", - "id": 154, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - }, - { - "name": "H2S", - "description": "Calculation of H2S based on AAN 803-04", - "id": 156, - "kwargs": { - "ae": null, - "alphasense_id": null, - "t": "EC_SENSOR_TEMP", - "we": null - }, - "post": true, - "function": "alphasense_803_04", - "unit": "ppb" - } - ], - "source": { - "type": "file", - "module": "scdata.io.device_file", - "handler": "CSVHandler", - "params": { - "header_skip": [ - 1, - 2, - 3 - ], - "index": "TIME", - "separator": ",", - "tzaware": true, - "timezone": "UTC" - } - } -} diff --git a/blueprints/sc_water_api.json b/blueprints/sc_water.json similarity index 100% rename from blueprints/sc_water_api.json rename to blueprints/sc_water.json From a5516d4dd40778460d017b61ec8bd4315d42c1c7 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 9 Apr 2024 14:09:03 +0200 Subject: [PATCH 43/72] Update SCAS210099.json --- hardware/SCAS210099.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hardware/SCAS210099.json b/hardware/SCAS210099.json index 095fcea3..86c5ed1d 100644 --- a/hardware/SCAS210099.json +++ b/hardware/SCAS210099.json @@ -1,5 +1,5 @@ { - "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/blueprints/sc_air_api.json", + "blueprint_url": "https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/blueprints/sc_air.json", "description": "2PMS5003-2ELEC-AFE", "versions": [ { @@ -12,4 +12,4 @@ "to": null } ] -} \ No newline at end of file +} From 3cf471bbfe3c4890da74260b170f4b6dd3622481 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Mon, 29 Apr 2024 23:01:30 +0200 Subject: [PATCH 44/72] Cleanup config and move dedicated params to specific files --- scdata/_config/__init__.py | 1 - scdata/_config/config.py | 444 ++++++++++++-------- scdata/_config/custom_logger.py | 40 ++ scdata/device/device.py | 11 +- scdata/device/process/__init__.py | 2 +- scdata/device/process/alphasense.py | 32 +- scdata/device/process/baseline.py | 2 +- scdata/device/process/regression.py | 4 +- scdata/device/process/timeseries.py | 2 +- scdata/io/device_api.py | 7 +- scdata/io/device_file.py | 4 +- scdata/io/model.py | 2 +- scdata/test/checks/checks.py | 2 +- scdata/test/dispersion/dispersion.py | 3 +- scdata/test/export/to_file.py | 2 +- scdata/test/plot/box_plot.py | 3 +- scdata/test/plot/heatmap_iplot.py | 3 +- scdata/test/plot/heatmap_plot.py | 3 +- scdata/test/plot/maps.py | 4 +- scdata/test/plot/plot_tools.py | 2 +- scdata/test/plot/scatter_dispersion_grid.py | 2 +- scdata/test/plot/scatter_iplot.py | 3 +- scdata/test/plot/scatter_plot.py | 3 +- scdata/test/plot/ts_dendrogram.py | 4 +- scdata/test/plot/ts_dispersion_grid.py | 2 +- scdata/test/plot/ts_dispersion_plot.py | 2 +- scdata/test/plot/ts_dispersion_uplot.py | 4 +- scdata/test/plot/ts_iplot.py | 3 +- scdata/test/plot/ts_plot.py | 4 +- scdata/test/plot/ts_scatter.py | 3 +- scdata/test/plot/ts_uplot.py | 4 +- scdata/test/test.py | 6 +- scdata/test/tools/combine.py | 2 +- scdata/test/tools/prepare.py | 4 +- scdata/tools/__init__.py | 15 - scdata/tools/{out.py => custom_logger.py} | 5 +- scdata/tools/find.py | 2 +- scdata/tools/{headers.py => gets.py} | 22 + scdata/tools/lazy.py | 2 +- scdata/tools/location.py | 2 +- scdata/tools/meta.py | 235 ----------- scdata/tools/units.py | 80 +++- 42 files changed, 493 insertions(+), 489 deletions(-) create mode 100644 scdata/_config/custom_logger.py rename scdata/tools/{out.py => custom_logger.py} (94%) rename scdata/tools/{headers.py => gets.py} (66%) delete mode 100644 scdata/tools/meta.py diff --git a/scdata/_config/__init__.py b/scdata/_config/__init__.py index 073b33b0..4277d20d 100644 --- a/scdata/_config/__init__.py +++ b/scdata/_config/__init__.py @@ -1,3 +1,2 @@ from .config import Config - config = Config() \ No newline at end of file diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 50618529..54c1a2cd 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -1,25 +1,31 @@ import yaml import json - -from scdata.tools.dictmerge import dict_fmerge -from scdata.tools.meta import (get_paths, load_blueprints, - load_calibrations, load_connectors, - load_env, load_names) - from os import pardir, environ from os.path import join, abspath, dirname, exists import sys - from math import inf from numpy import array - import logging +from os import pardir, environ, name, makedirs +from os.path import join, dirname, expanduser, exists, basename +from urllib.parse import urlparse +import os +from shutil import copyfile +from requests import get +# from traceback import print_exc +import json +from pydantic import TypeAdapter +from typing import List -class Config(object): - log_level = logging.INFO +from scdata.models import Name, Blueprint, Metric +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.gets import get_json_from_url - # Timestamp for log output - _timestamp = True +class Config(object): + ### --------------------------------------- + ### ---------------LOG-LEVEL--------------- + ### --------------------------------------- + _log_level = logging.INFO # Framework option # For renderer plots and config files @@ -27,15 +33,11 @@ class Config(object): # - 'script': no plots in jupyter, updates config # - 'jupyterlab': for plots, updates config # - 'chupiflow': no plots in jupyter, does not update config - framework = 'script' + _framework = 'script' if 'IPython' in sys.modules: _ipython_avail = True else: _ipython_avail = False - # Default timezone - _timezone = 'Europe/Madrid' - _epsg = 4326 - # Returns when iterables cannot be fully processed _strict = False @@ -50,9 +52,6 @@ class Config(object): ### -----------------DATA------------------ ### --------------------------------------- - ## Place here options for data load and handling - _combined_devices_name = 'COMBINED_DEVICES' - data = { # Whether or not to reload metadata from git repo 'reload_metadata': True, @@ -68,15 +67,9 @@ class Config(object): 'strict_load': False } - # If using multiple training datasets, how to call the joint df - _name_multiple_training_data = 'CDEV' - # Maximum amount of points to load when postprocessing data _max_load_amount = 500 - # Ignore duplicate sensor ids - _sc_ignore_keys = ['DALLAS_TEMP', 'GB_TEMP', 'GB_HUM'] - # Ingore Nas when loading data (for now only in CSVs) # Similar to na_values in https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html _ignore_na_values = [' nan'] @@ -85,6 +78,7 @@ class Config(object): ### --------------ALGORITHMS--------------- ### --------------------------------------- + ## TODO - move out from here # Whether or not to plot intermediate debugging visualisations in the algorithms _intermediate_plots = False @@ -117,12 +111,12 @@ class Config(object): # # f'{_base_postprocessing_url}blueprints/parrot_soil.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sc_20_station_iscape.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sc_21_station_iscape.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_21_station_module.{_default_file_type}', + # f'{_base_postprocessing_url}blueprints/sc_21_station_module.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck_15.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck_20.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_air_api.{_default_file_type}', - f'{_base_postprocessing_url}blueprints/sc_air_csv.{_default_file_type}', + f'{_base_postprocessing_url}blueprints/sc_air.{_default_file_type}', + f'{_base_postprocessing_url}blueprints/sc_water.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck_21_sps30.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck_21_sen5x.{_default_file_type}', # f'{_base_postprocessing_url}blueprints/sck_21_gps.{_default_file_type}', @@ -131,28 +125,16 @@ class Config(object): # f'{_base_postprocessing_url}blueprints/sc_21_water.{_default_file_type}' ] - connectors_urls = [ - f'{_base_postprocessing_url}connectors/nilu.{_default_file_type}' - ] + # connectors_urls = [ + # f'{_base_postprocessing_url}connectors/nilu.{_default_file_type}' + # ] names_urls = [ + # Revert to base postprocessing url # f'{_base_postprocessing_url}names/SCDevice.json' 'https://raw.githubusercontent.com/fablabbcn/smartcitizen-data/enhacement/flexible-handlers/names/SCDevice.json' ] - # Convertion table from API SC to Pandas - # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags - # https://developer.smartcitizen.me/#get-historical-readings - _freq_conv_lut = ( - ['y','A'], - ['M','M'], - ['w','W'], - ['d','D'], - ['h','H'], - ['m','Min'], - ['s','S'], - ['ms','ms'] - ) ### --------------------------------------- ### -------------METRICS DATA-------------- @@ -164,89 +146,9 @@ class Config(object): 'GPS_HDOP': [-inf, 0, 40, 80, 120, 160, 200, 240, 260, 300, inf] } - _as_sensor_codes = { - '132': 'ASA4_CO', - '133': 'ASA4_H2S', - '130': 'ASA4_NO', - '212': 'ASA4_NO2', - '214': 'ASA4_OX', - '134': 'ASA4_SO2', - '162': 'ASB4_CO', - '133': 'ASB4_H2S',# - '130': 'ASB4_NO', # - '202': 'ASB4_NO2', - '204': 'ASB4_OX', - '164': 'ASB4_SO2' - } - - # Alphasense temperature channels (in order of priority) - _as_temp_channel = [ - "ASPT1000", - "SHT31_EXT_TEMP", - "SHT35_EXT_TEMP", - "PM_DALLAS_TEMP", - ] _channel_bin_n = 11 - # Molecular weights of certain pollutants for unit convertion - _molecular_weights = { - 'CO': 28, - 'NO': 30, - 'NO2': 46, - 'O3': 48, - 'C6H6': 78, - 'SO2': 64, - 'H2S': 34 - } - - # This look-up table is comprised of channels you want always want to have with the same units and that might come from different sources - # i.e. pollutant data in various units (ppm or ug/m3) from different analysers - # The table should be used as follows: - # 'key': 'units', - # - 'key' is the channel that will lately be used in the analysis. It supports regex - # - target_unit is the unit you want this channel to be and that will be converted in case of it being found in the channels list of your source - - _channel_lut = { - "TEMP": "degC", - "HUM": "%rh", - "PRESS": "kPa", - "PM_(\d|[A,B]_\d)": "ug/m3", - "^CO2": "ppm", - "^CO": "ppb", # Always start with CO - "NOISE_A": "dBA", - "NO\Z": "ppb", - "NO2": "ppb", - "NOX": "ppb", - "O3": "ppb", - "C6H6": "ppb", - "H2S": "ppb", - "SO2": "ppb", - "CO2": "ppm" - } - - # This table is used to convert units - # ['from_unit', 'to_unit', 'multiplicative_factor', 'requires_M'] - # - 'from_unit'/'to_unit' = 'multiplicative_factor' - # - 'requires_M' = whether it - # It accepts reverse operations - you don't need to put them twice but in reverse - - _unit_convertion_lut = ( - ['%rh', '%', 1, False], - ['ºC', 'degC', 1, False], - ['ppm', 'ppb', 1000, False], - ['mg/m3', 'ug/m3', 1000, False], - ['mgm3', 'ugm3', 1000, False], - ['mg/m3', 'ppm', 24.45, True], - ['mgm3', 'ppm', 24.45, True], - ['ug/m3', 'ppb', 24.45, True], - ['ugm3', 'ppb', 24.45, True], - ['mg/m3', 'ppb', 1000*24.45, True], - ['mgm3', 'ppb', 1000*24.45, True], - ['ug/m3', 'ppm', 1./1000*24.45, True], - ['ugm3', 'ppm', 1./1000*24.45, True] - ) - ### --------------------------------------- ### ----------------PLOTS------------------ ### --------------------------------------- @@ -279,7 +181,6 @@ class Config(object): '#d1e5f0','#fddbc7','#f4a582','#d6604d', '#b2182b','#67001f']) - _plot_style = "seaborn-whitegrid" _ts_plot_def_fmt = { @@ -522,42 +423,26 @@ class Config(object): } ### --------------------------------------- - ### ----------------DISCARD---------------- + ### ----------------CSV-------------------- ### --------------------------------------- - _discvars = [ - 'readings', - 'api_device', - 'options', - 'loaded', - 'hw_id', - 'blueprint_url', - 'hardware_url', - 'processed', - 'forwarding_params', - 'meta', - 'processed', - 'postprocessing_info', - 'hw_updated_at', - 'description', - 'latest_postprocessing', - 'blueprint_loaded_from_url', - 'hardware_loaded_from_url', - 'data' - ] - _csv_defaults = { 'index_name': 'TIME', 'sep': ',', 'skiprows': None } + ### --------------------------------------- + ### --------------------------------------- + ### --------------------------------------- + def __init__(self): - self._env_file = False - self.paths = get_paths() + self._env_file = None + self.paths = self.get_paths() self.load() self.get_meta_data() + def __getattr__(self, name): try: return self[name] @@ -575,13 +460,209 @@ def __getitem__(self, key): def __iter__(self): return (i for i in dir(self)) + def load_env(self): + with open(self._env_file) as f: + for line in f: + # Ignore empty lines or lines that start with # + if line.startswith('#') or not line.strip(): + continue + # Load to local environ + key, value = line.strip().split('=', 1) + environ[key] = value + + def load_calibrations(self, urls): + ''' + Loads calibrations from urls. + The calibrations are meant for alphasense's 4 electrode sensors. The files contains: + { + "162031254": { + "ae_electronic_zero_mv": "", + "ae_sensor_zero_mv": "-16.64", + "ae_total_zero_mv": "", + "pcb_gain_mv_na": "0.8", + "we_cross_sensitivity_no2_mv_ppb": "0", + "we_cross_sensitivity_no2_na_ppb": "0", + "we_electronic_zero_mv": "", + "we_sensitivity_mv_ppb": "0.45463999999999993", + "we_sensitivity_na_ppb": "0.5682999999999999", + "we_sensor_zero_mv": "-27.200000000000003", + "we_total_zero_mv": "" + }, + ... + } + Parameters + ---------- + urls: [String] + json file urls + Returns + --------- + Dictionary containing calibrations otherwise None + ''' + + calibrations = dict() + for url in urls: + try: + rjson, _ = get_json_from_url(url) + calibrations = dict_fmerge(rjson, calibrations) + except: + print(f'Problem loading calibrations from {url}') + return None + + return calibrations + + # def load_connectors(self, urls): + # connectors = dict() + # for url in urls: + # try: + # c = get_json_from_url(url) + # _nc = basename(urlparse(str(url)).path).split('.')[0] + # connectors[_nc] = c + # except: + # print(f'Problem loading connectors from {url}') + # print_exc() + # return None + + # return connectors + + def load_blueprints(self, urls): + blueprints = dict() + for url in urls: + if url is None: continue + _nblueprint = basename(urlparse(str(url)).path).split('.')[0] + rjson, _ = get_json_from_url(url) + + if rjson is None: + continue + if _nblueprint not in blueprints: + blueprints[_nblueprint] = TypeAdapter(Blueprint).validate_python(rjson).dict() + + return blueprints + + def load_names(self, urls): + isn = True + names = dict() + + for url in urls: + result = list() + _nc = basename(urlparse(str(url)).path).split('.')[0] + + while isn: + try: + rjson, rheaders = get_json_from_url(url) + result += TypeAdapter(List[Name]).validate_python(rjson) + except: + isn = False + pass + else: + if 'next' in rheaders: + if rheaders['next'] == url: isn = False + elif rheaders['next'] != url: url = rheaders['next'] + else: + isn = False + names[_nc] = result + + return names + + def get_paths(self): + + # Check if windows + _mswin = name == "nt" + # Get user_home + _user_home = expanduser("~") + + # Get .config dir + if _mswin: + _cdir = environ["APPDATA"] + elif 'XDG_CONFIG_HOME' in environ: + _cdir = environ['XDG_CONFIG_HOME'] + else: + _cdir = join(expanduser("~"), '.config') + + # Get .cache dir - maybe change it if found in config.json + if _mswin: + _ddir = environ["APPDATA"] + elif 'XDG_CACHE_HOME' in environ: + _ddir = environ['XDG_CACHE_HOME'] + else: + _ddir = join(expanduser("~"), '.cache') + + # Set config and cache (data) dirs + _sccdir = join(_cdir, 'scdata') + _scddir = join(_ddir, 'scdata') + + makedirs(_sccdir, exist_ok=True) + makedirs(_scddir, exist_ok=True) + + _paths = dict() + + _paths['config'] = _sccdir + _paths['data'] = _scddir + + # Auxiliary folders + + # - Processed data + _paths['processed'] = join(_paths['data'], 'processed') + makedirs(_paths['processed'], exist_ok=True) + + # - Internal data: blueprints and calibrations + _paths['interim'] = join(_paths['data'], 'interim') + makedirs(_paths['interim'], exist_ok=True) + + # Check for blueprints and calibrations + # Find the path to the interim folder + _dir = dirname(__file__) + _idir = join(_dir, 'interim') + + # - Models and local tests + _paths['models'] = join(_paths['data'], 'models') + makedirs(_paths['models'], exist_ok=True) + + # - Exports + _paths['export'] = join(_paths['data'], 'export') + makedirs(_paths['export'], exist_ok=True) + + # - Raw + _paths['raw'] = join(_paths['data'], 'raw') + makedirs(_paths['raw'], exist_ok=True) + # Copy example csvs + _enames = ['example.csv', 'geodata.csv'] + for _ename in _enames: + s = join(_idir, _ename) + d = join(_paths['raw'], _ename) + if not exists(join(_paths['raw'], _ename)): copyfile(s, d) + + # - Reports + _paths['reports'] = join(_paths['data'], 'reports') + makedirs(_paths['reports'], exist_ok=True) + + # - Tasks + _paths['tasks'] = join(_paths['data'], 'tasks') + makedirs(_paths['tasks'], exist_ok=True) + + # - Uploads + _paths['uploads'] = join(_paths['data'], 'uploads') + makedirs(_paths['uploads'], exist_ok=True) + + # Check for uploads + _example_uploads = ['example_upload_1.json', 'example_zenodo_upload.yaml'] + _udir = join(_dir, 'uploads') + for item in _example_uploads: + s = join(_udir, item) + d = join(_paths['uploads'], item) + if not exists(d): copyfile(s, d) + + # Inventory (normally not used by user) + _paths['inventory'] = '' + + return _paths + def get_meta_data(self): """ Get meta data from blueprints and _calibrations """ - # (re)load calibrations + # Load blueprints, calibrations and names bppath = join(self.paths['interim'], 'blueprints.json') if self.data['reload_metadata'] or not exists(bppath): - blueprints = load_blueprints(self.blueprints_urls) + blueprints = self.load_blueprints(self.blueprints_urls) bpreload = True else: with open(bppath, 'r') as file: blueprints = json.load(file) @@ -589,59 +670,53 @@ def get_meta_data(self): calpath = join(self.paths['interim'], 'calibrations.json') if self.data['reload_metadata'] or not exists(calpath): - calibrations = load_calibrations(self.calibrations_urls) + calibrations = self.load_calibrations(self.calibrations_urls) calreload = True else: with open(calpath, 'r') as file: calibrations = json.load(file) calreload = False - conpath = join(self.paths['interim'], 'connectors.json') - if self.data['reload_metadata'] or not exists(conpath): - connectors = load_connectors(self.connectors_urls) - conreload = True - else: - with open(conpath, 'r') as file: connectors = json.load(file) - conreload = False - namespath = join(self.paths['interim'], 'names.json') if self.data['reload_metadata'] or not exists(namespath): - names = load_names(self.names_urls) + names = self.load_names(self.names_urls) namesreload = True - # else: - # TODO Implement load of serialised model - # with open(namespath, 'r') as file: names = json.load(file) - # namesreload = False - + else: + names = dict() + with open(namespath, 'r') as file: + names_load = json.load(file) + for item in names_load: + names[item] = TypeAdapter(List[Name]).validate_python(names_load[item]) + namesreload = False + + # Dump blueprints, calibrations and names if blueprints is not None: self.blueprints = blueprints if bpreload: - with open(bppath, 'w') as file: json.dump(blueprints, file) + with open(bppath, 'w') as file: + json.dump(blueprints, file) + if calibrations is not None: self.calibrations = calibrations if calreload: - with open(calpath, 'w') as file: json.dump(calibrations, file) - if connectors is not None: - self.connectors = connectors - if conreload: - with open(conpath, 'w') as file: json.dump(connectors, file) + with open(calpath, 'w') as file: + json.dump(calibrations, file) + if names is not None: self.names = names - # if namesreload: - # TODO Implement dump of serialised model - # with open(namespath, 'w') as file: json.dump(names, file) + if namesreload: + for item in self.names: + names_dump = {item: [name.model_dump() for name in self.names[item]]} + with open(namespath, 'w') as file: + json.dump(names_dump, file) # Find environment file in root or in scdata/ for clones if exists(join(self.paths['data'],'.env')): - env_file = join(self.paths['data'],'.env') + self._env_file = join(self.paths['data'],'.env') + print(f'Found Environment file at: {self._env_file}') + self.load_env() else: print(f'No environment file found. If you had an environment file (.env) before, make sure its now here') print(join(self.paths['data'],'.env')) - env_file = None - - # Load .env for tokens and stuff if found - if env_file is not None and not self._env_file: - print(f'Found Environment file at: {env_file}') - if load_env(env_file): self._env_file = True def load(self): """ Override config if config file exists. """ @@ -659,7 +734,8 @@ def load(self): except KeyError: # Ignore unrecognised data in config print ("Unrecognised config item: %s", k) - if self.framework != 'chupiflow': + + if self._framework != 'chupiflow': self.save() def save(self): diff --git a/scdata/_config/custom_logger.py b/scdata/_config/custom_logger.py new file mode 100644 index 00000000..d9cc0863 --- /dev/null +++ b/scdata/_config/custom_logger.py @@ -0,0 +1,40 @@ + +from termcolor import colored +from scdata._config import config +from datetime import datetime +import sys + +import logging + +class CutsomLoggingFormatter(logging.Formatter): + + grey = "\x1b[38;20m" + yellow = "\x1b[33;20m" + red = "\x1b[31;20m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + format_min = "[%(asctime)s] - %(name)s - %(levelname)s - %(message)s" + format_deb = "[%(asctime)s] - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" + + FORMATS = { + logging.DEBUG: grey + format_min + reset, + logging.INFO: grey + format_min + reset, + logging.WARNING: yellow + format_min + reset, + logging.ERROR: red + format_deb + reset, + logging.CRITICAL: bold_red + format_deb + reset + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + +logger = logging.getLogger('scdata') +# logger.setLevel(config.log_level) +# ch = logging.StreamHandler(sys.stdout) +# ch.setLevel(config.log_level) +# ch.setFormatter(CutsomLoggingFormatter()) +# logger.addHandler(ch) + +def set_logger_level(level=logging.DEBUG): + logger.setLevel(level) diff --git a/scdata/device/device.py b/scdata/device/device.py index 3b0b6bad..03bb550d 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -1,10 +1,13 @@ ''' Main implementation of class Device ''' -from scdata.tools import logger, localise_date, \ - dict_fmerge, get_units_convf +from scdata.tools.custom_logger import logger from scdata.io import read_csv_file, export_csv_file -from scdata.tools import LazyCallable, url_checker, \ - get_json_from_url, find_by_field +from scdata.tools.lazy import LazyCallable +from scdata.tools.url_check import url_checker +from scdata.tools.date import localise_date +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.units import get_units_convf +from scdata.tools.find import find_by_field from scdata._config import config from scdata.io.device_api import * from scdata.models import Blueprint, Metric, Source, APIParams, FileParams, DeviceOptions, Sensor diff --git a/scdata/device/process/__init__.py b/scdata/device/process/__init__.py index 8fcd9ac4..12ca3a57 100644 --- a/scdata/device/process/__init__.py +++ b/scdata/device/process/__init__.py @@ -1,6 +1,6 @@ ''' Implementation of different processes to be done in each device ''' -from scdata.tools import LazyCallable +from scdata.tools.lazy import LazyCallable from .formulae import absolute_humidity, exp_f, fit_exp_f from .geoseries import is_within_circle from .timeseries import clean_ts, merge_ts, rolling_avg, poly_ts, geo_located, time_derivative, delta_index_ts diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index 317bf184..72cc9782 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -1,4 +1,6 @@ -from scdata.tools import logger, get_units_convf, find_dates, localise_date +from scdata.tools.custom_logger import logger +from scdata.tools.units import get_units_convf +from scdata.tools.date import find_dates, localise_date from scdata._config import config from scdata.device.process.params import * from scdata.device.process import baseline_calc, clean_ts @@ -6,6 +8,30 @@ import matplotlib.pyplot as plt from pandas import date_range, DataFrame, Series, isnull +# Alphasense sensor codes +alphasense_sensor_codes = { + '132': 'ASA4_CO', + '133': 'ASA4_H2S', + '130': 'ASA4_NO', + '212': 'ASA4_NO2', + '214': 'ASA4_OX', + '134': 'ASA4_SO2', + '162': 'ASB4_CO', + '133': 'ASB4_H2S',# + '130': 'ASB4_NO', # + '202': 'ASB4_NO2', + '204': 'ASB4_OX', + '164': 'ASB4_SO2' +} + +# Alphasense temperature channels (in order of priority) +alphasense_temp_channel = [ + "ASPT1000", + "SHT31_EXT_TEMP", + "SHT35_EXT_TEMP", + "PM_DALLAS_TEMP", +] + def alphasense_803_04(dataframe, **kwargs): """ Calculates pollutant concentration based on 4 electrode sensor readings (mV) @@ -71,7 +97,7 @@ def comp_t(x, comp_lut): df = dataframe.copy() # Get sensor type - as_type = config._as_sensor_codes[kwargs['alphasense_id'][0:3]] + as_type = alphasense_sensor_codes[kwargs['alphasense_id'][0:3]] # Use alternative method or not if 'use_alternative' not in kwargs: kwargs['use_alternative'] = False @@ -148,7 +174,7 @@ def ec_sensor_temp(dataframe, **kwargs): """ if 'priority' in kwargs: if kwargs['priority'] in dataframe.columns: return dataframe[kwargs['priority']] - for option in config._as_temp_channel: + for option in alphasense_temp_channel: if option in dataframe.columns: return dataframe[option] logger.error('Problem with input data') return None diff --git a/scdata/device/process/baseline.py b/scdata/device/process/baseline.py index 50d2292b..0ca0d077 100644 --- a/scdata/device/process/baseline.py +++ b/scdata/device/process/baseline.py @@ -7,7 +7,7 @@ from numpy import max as npmax from numpy import abs as npabs from numpy import argmax, argmin, arange, exp -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata._config import config from math import isnan from .formulae import exp_f diff --git a/scdata/device/process/regression.py b/scdata/device/process/regression.py index 0334c135..dd30b86e 100644 --- a/scdata/device/process/regression.py +++ b/scdata/device/process/regression.py @@ -1,5 +1,7 @@ from scdata._config import config -from scdata.tools import logger, dict_fmerge, clean +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.cleaning import clean from pandas import DataFrame from numpy import array diff --git a/scdata/device/process/timeseries.py b/scdata/device/process/timeseries.py index 1a8e0530..168836e8 100644 --- a/scdata/device/process/timeseries.py +++ b/scdata/device/process/timeseries.py @@ -1,7 +1,7 @@ from numpy import nan, full, power, ones, diff, convolve, append from scipy import ndimage from scdata.device.process import is_within_circle -from scdata.tools import logger +from scdata.tools.custom_logger import logger def delta_index_ts(dataframe, **kwargs): result = dataframe.index.to_series().diff().astype('timedelta64[s]') diff --git a/scdata/io/device_api.py b/scdata/io/device_api.py index 827def8b..8ff17a9d 100644 --- a/scdata/io/device_api.py +++ b/scdata/io/device_api.py @@ -10,7 +10,12 @@ from geopy.distance import distance from scdata._config import config -from scdata.tools import logger, localise_date, clean, get_elevation, url_checker, process_headers +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.date import localise_date +from scdata.tools.cleaning import clean +from scdata.tools.url_check import url_checker +from scdata.tools.gets import process_headers # from tzwhere import tzwhere from timezonefinder import TimezoneFinder from datetime import date, datetime diff --git a/scdata/io/device_file.py b/scdata/io/device_file.py index e3d546ea..e08e8020 100644 --- a/scdata/io/device_file.py +++ b/scdata/io/device_file.py @@ -1,6 +1,8 @@ from os import makedirs, listdir from os.path import exists, join, splitext -from scdata.tools import logger, localise_date, clean +from scdata.tools.custom_logger import logger +from scdata.tools.date import localise_date +from scdata.tools.cleaning import clean from pandas import read_csv, to_datetime, DataFrame from scdata._config import config import csv diff --git a/scdata/io/model.py b/scdata/io/model.py index c0651bf6..c61ed9d1 100644 --- a/scdata/io/model.py +++ b/scdata/io/model.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger from joblib import dump, load from scdata._config import config from os.path import join, exists diff --git a/scdata/test/checks/checks.py b/scdata/test/checks/checks.py index 787cf766..2bb8d669 100644 --- a/scdata/test/checks/checks.py +++ b/scdata/test/checks/checks.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger import matplotlib.pyplot as plt import missingno as msno from pandas import to_datetime, DataFrame diff --git a/scdata/test/dispersion/dispersion.py b/scdata/test/dispersion/dispersion.py index b693e3f4..764afcd3 100644 --- a/scdata/test/dispersion/dispersion.py +++ b/scdata/test/dispersion/dispersion.py @@ -1,4 +1,5 @@ -from scdata.tools import logger, localise_date +from scdata.tools.custom_logger import logger +from scdata.tools.date import localise_date from pandas import DataFrame from scdata._config import config diff --git a/scdata/test/export/to_file.py b/scdata/test/export/to_file.py index 8dde9a79..7fafd458 100755 --- a/scdata/test/export/to_file.py +++ b/scdata/test/export/to_file.py @@ -2,7 +2,7 @@ from os.path import join, dirname, exists from os import makedirs -from scdata.tools import logger +from scdata.tools.custom_logger import logger import flask from re import sub diff --git a/scdata/test/plot/box_plot.py b/scdata/test/plot/box_plot.py index 564c36cc..27a83e02 100644 --- a/scdata/test/plot/box_plot.py +++ b/scdata/test/plot/box_plot.py @@ -3,7 +3,8 @@ from matplotlib import style from seaborn import set_palette, boxplot # import seaborn as sns -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session diff --git a/scdata/test/plot/heatmap_iplot.py b/scdata/test/plot/heatmap_iplot.py index 3cd94c4c..6d6d9453 100644 --- a/scdata/test/plot/heatmap_iplot.py +++ b/scdata/test/plot/heatmap_iplot.py @@ -1,5 +1,6 @@ from plotly.graph_objs import Heatmap, Layout, Figure -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session from plotly.offline import iplot diff --git a/scdata/test/plot/heatmap_plot.py b/scdata/test/plot/heatmap_plot.py index 9d8387e4..d24f392f 100644 --- a/scdata/test/plot/heatmap_plot.py +++ b/scdata/test/plot/heatmap_plot.py @@ -2,7 +2,8 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, heatmap -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data, groupby_session diff --git a/scdata/test/plot/maps.py b/scdata/test/plot/maps.py index ed4b159f..9ec2d268 100644 --- a/scdata/test/plot/maps.py +++ b/scdata/test/plot/maps.py @@ -7,7 +7,9 @@ from math import isnan, floor, ceil from traceback import print_exc from pandas import cut, date_range -from scdata.tools import dict_fmerge, clean, logger +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.cleaning import clean +from scdata.tools.custom_logger import logger from scdata._config import config from numpy import linspace, nan from branca import element diff --git a/scdata/test/plot/plot_tools.py b/scdata/test/plot/plot_tools.py index 3cacb793..7b57c510 100644 --- a/scdata/test/plot/plot_tools.py +++ b/scdata/test/plot/plot_tools.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger from numpy import arange from pandas import cut, DataFrame, to_datetime, option_context, to_numeric import io diff --git a/scdata/test/plot/scatter_dispersion_grid.py b/scdata/test/plot/scatter_dispersion_grid.py index 003a18ae..d5a6cf5e 100644 --- a/scdata/test/plot/scatter_dispersion_grid.py +++ b/scdata/test/plot/scatter_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm diff --git a/scdata/test/plot/scatter_iplot.py b/scdata/test/plot/scatter_iplot.py index 18c6ba08..d2c29e2d 100644 --- a/scdata/test/plot/scatter_iplot.py +++ b/scdata/test/plot/scatter_iplot.py @@ -1,4 +1,5 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from .scatter_plot import scatter_plot from scdata._config import config from plotly.io import renderers diff --git a/scdata/test/plot/scatter_plot.py b/scdata/test/plot/scatter_plot.py index c76cada2..5272bb6e 100644 --- a/scdata/test/plot/scatter_plot.py +++ b/scdata/test/plot/scatter_plot.py @@ -2,7 +2,8 @@ from matplotlib import rcParams from matplotlib import style from seaborn import set_palette, regplot, scatterplot, relplot -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data, colors from numpy import array diff --git a/scdata/test/plot/ts_dendrogram.py b/scdata/test/plot/ts_dendrogram.py index 190e24e1..27894efa 100644 --- a/scdata/test/plot/ts_dendrogram.py +++ b/scdata/test/plot/ts_dendrogram.py @@ -1,6 +1,8 @@ from scipy.cluster import hierarchy as hc from pandas import DataFrame -from scdata.tools import logger, dict_fmerge, clean +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.cleaning import clean from scdata._config import config import matplotlib.pyplot as plt from matplotlib import rcParams diff --git a/scdata/test/plot/ts_dispersion_grid.py b/scdata/test/plot/ts_dispersion_grid.py index 308c258e..b6ccfdf2 100644 --- a/scdata/test/plot/ts_dispersion_grid.py +++ b/scdata/test/plot/ts_dispersion_grid.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.cm as cm diff --git a/scdata/test/plot/ts_dispersion_plot.py b/scdata/test/plot/ts_dispersion_plot.py index ea7c6edc..22d19f1e 100644 --- a/scdata/test/plot/ts_dispersion_plot.py +++ b/scdata/test/plot/ts_dispersion_plot.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata._config import config import matplotlib.pyplot as plt import matplotlib.colors diff --git a/scdata/test/plot/ts_dispersion_uplot.py b/scdata/test/plot/ts_dispersion_uplot.py index 41dddcfc..5b33f17d 100644 --- a/scdata/test/plot/ts_dispersion_uplot.py +++ b/scdata/test/plot/ts_dispersion_uplot.py @@ -1,4 +1,6 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge + from scdata._config import config from .plot_tools import colors from scipy.stats import t diff --git a/scdata/test/plot/ts_iplot.py b/scdata/test/plot/ts_iplot.py index fcc24670..f71aeead 100644 --- a/scdata/test/plot/ts_iplot.py +++ b/scdata/test/plot/ts_iplot.py @@ -1,4 +1,5 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data diff --git a/scdata/test/plot/ts_plot.py b/scdata/test/plot/ts_plot.py index a0b040a7..3972e7cf 100644 --- a/scdata/test/plot/ts_plot.py +++ b/scdata/test/plot/ts_plot.py @@ -1,4 +1,6 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge + from scdata._config import config from .plot_tools import prepare_data from pandas import to_datetime diff --git a/scdata/test/plot/ts_scatter.py b/scdata/test/plot/ts_scatter.py index a8034662..ea3394bf 100644 --- a/scdata/test/plot/ts_scatter.py +++ b/scdata/test/plot/ts_scatter.py @@ -1,4 +1,5 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge from scdata._config import config from .plot_tools import prepare_data diff --git a/scdata/test/plot/ts_uplot.py b/scdata/test/plot/ts_uplot.py index 3fe57a07..ddd455ab 100644 --- a/scdata/test/plot/ts_uplot.py +++ b/scdata/test/plot/ts_uplot.py @@ -1,4 +1,6 @@ -from scdata.tools import logger, dict_fmerge +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge + from scdata._config import config from .plot_tools import prepare_data, colors diff --git a/scdata/test/test.py b/scdata/test/test.py index b91f9b73..3e763612 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -13,10 +13,12 @@ from pydantic import TypeAdapter, BaseModel, ConfigDict, model_serializer from typing import Optional, List, Dict, Any -from scdata.tools import logger, localise_date, find_by_field +from scdata.tools.custom_logger import logger +from scdata.tools.date import localise_date +from scdata.tools.find import find_by_field from scdata.io import read_csv_file, export_csv_file from scdata._config import config -from scdata.device import Device +from scdata import Device from scdata.models import TestOptions#, TestResult class Test(BaseModel): diff --git a/scdata/test/tools/combine.py b/scdata/test/tools/combine.py index c98a9a75..4f7e4f79 100644 --- a/scdata/test/tools/combine.py +++ b/scdata/test/tools/combine.py @@ -1,5 +1,5 @@ from pandas import DataFrame -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata.device import Device def combine(self, devices = None, readings = None): diff --git a/scdata/test/tools/prepare.py b/scdata/test/tools/prepare.py index 99ea529f..db032370 100644 --- a/scdata/test/tools/prepare.py +++ b/scdata/test/tools/prepare.py @@ -1,4 +1,6 @@ -from scdata.tools import logger, dict_fmerge, clean +from scdata.tools.custom_logger import logger +from scdata.tools.dictmerge import dict_fmerge +from scdata.tools.cleaning import clean from scdata._config import config from numpy import array from pandas import DataFrame diff --git a/scdata/tools/__init__.py b/scdata/tools/__init__.py index c02fd9db..e69de29b 100644 --- a/scdata/tools/__init__.py +++ b/scdata/tools/__init__.py @@ -1,15 +0,0 @@ -from .out import logger, set_logger_level -from .date import localise_date, find_dates -from .units import get_units_convf -from .dictmerge import dict_fmerge -from .lazy import LazyCallable -from .meta import get_current_blueprints, load_blueprints, get_json_from_url, load_names -from .stats import spearman, get_metrics -from .cleaning import clean -from .location import get_elevation -from .url_check import url_checker -from .headers import process_headers -from .find import find_by_field -# from .other.manage_post_info import create_post_info -# from .zenodo import zenodo_upload -# from .report import include_footer diff --git a/scdata/tools/out.py b/scdata/tools/custom_logger.py similarity index 94% rename from scdata/tools/out.py rename to scdata/tools/custom_logger.py index 68c2cb67..5ad121b3 100644 --- a/scdata/tools/out.py +++ b/scdata/tools/custom_logger.py @@ -2,7 +2,6 @@ from scdata._config import config from datetime import datetime import sys - import logging class CutsomLoggingFormatter(logging.Formatter): @@ -29,9 +28,9 @@ def format(self, record): return formatter.format(record) logger = logging.getLogger('scdata') -logger.setLevel(config.log_level) +logger.setLevel(config._log_level) ch = logging.StreamHandler(sys.stdout) -ch.setLevel(config.log_level) +ch.setLevel(config._log_level) ch.setFormatter(CutsomLoggingFormatter()) logger.addHandler(ch) diff --git a/scdata/tools/find.py b/scdata/tools/find.py index 7a02fea8..695818f0 100644 --- a/scdata/tools/find.py +++ b/scdata/tools/find.py @@ -1,4 +1,4 @@ -from scdata.tools import logger +from scdata.tools.custom_logger import logger def find_by_field(models, value, field): try: diff --git a/scdata/tools/headers.py b/scdata/tools/gets.py similarity index 66% rename from scdata/tools/headers.py rename to scdata/tools/gets.py index e72fada0..8cd951a1 100644 --- a/scdata/tools/headers.py +++ b/scdata/tools/gets.py @@ -1,3 +1,5 @@ +from requests import get + def process_headers(headers): result = {} if 'total' in headers: result['total_pages'] = headers['total'] @@ -16,3 +18,23 @@ def process_headers(headers): elif which == 'first': result['first'] = chunk[0].strip('<').strip('>') return result + +def get_json_from_url(url): + + # Gets a json from an url and returns it as a dict + rjson = None + rheaders = None + try: + r = get(url) + r.raise_for_status() + rjson = r.json() + rheaders = process_headers(r.headers) + except ConnectionError: + print (f'Failed request. Response {r.status_code}') + pass + except: + pass + + return rjson, rheaders + + diff --git a/scdata/tools/lazy.py b/scdata/tools/lazy.py index 7273a7f2..19d7c215 100644 --- a/scdata/tools/lazy.py +++ b/scdata/tools/lazy.py @@ -1,5 +1,5 @@ import sys -from .out import logger +from .custom_logger import logger class LazyCallable(object): ''' diff --git a/scdata/tools/location.py b/scdata/tools/location.py index 2e76a1b1..0f1dd880 100644 --- a/scdata/tools/location.py +++ b/scdata/tools/location.py @@ -1,6 +1,6 @@ from requests import get from pandas import json_normalize -from scdata.tools import logger +from scdata.tools.custom_logger import logger from scdata._config import config def get_elevation(_lat = None, _long = None): diff --git a/scdata/tools/meta.py b/scdata/tools/meta.py deleted file mode 100644 index 66aa940f..00000000 --- a/scdata/tools/meta.py +++ /dev/null @@ -1,235 +0,0 @@ -from .dictmerge import dict_fmerge -from os import pardir, environ, name, makedirs -from os.path import join, dirname, expanduser, exists, basename -from urllib.parse import urlparse -import os -from shutil import copyfile -from requests import get -from traceback import print_exc -import json -from re import sub -from pydantic import TypeAdapter -from typing import List -from scdata.tools.headers import process_headers -from scdata.models import Name, Blueprint, Metric - -def get_paths(): - - # Check if windows - _mswin = name == "nt" - # Get user_home - _user_home = expanduser("~") - - # Get .config dir - if _mswin: - _cdir = environ["APPDATA"] - elif 'XDG_CONFIG_HOME' in environ: - _cdir = environ['XDG_CONFIG_HOME'] - else: - _cdir = join(expanduser("~"), '.config') - - # Get .cache dir - maybe change it if found in config.json - if _mswin: - _ddir = environ["APPDATA"] - elif 'XDG_CACHE_HOME' in environ: - _ddir = environ['XDG_CACHE_HOME'] - else: - _ddir = join(expanduser("~"), '.cache') - - # Set config and cache (data) dirs - _sccdir = join(_cdir, 'scdata') - _scddir = join(_ddir, 'scdata') - - makedirs(_sccdir, exist_ok=True) - makedirs(_scddir, exist_ok=True) - - _paths = dict() - - _paths['config'] = _sccdir - _paths['data'] = _scddir - - # Auxiliary folders - - # - Processed data - _paths['processed'] = join(_paths['data'], 'processed') - makedirs(_paths['processed'], exist_ok=True) - - # - Internal data: blueprints and calibrations - _paths['interim'] = join(_paths['data'], 'interim') - makedirs(_paths['interim'], exist_ok=True) - - # Check for blueprints and calibrations - # Find the path to the interim folder - _dir = dirname(__file__) - _idir = join(_dir, 'interim') - - # - Models and local tests - _paths['models'] = join(_paths['data'], 'models') - makedirs(_paths['models'], exist_ok=True) - - # - Exports - _paths['export'] = join(_paths['data'], 'export') - makedirs(_paths['export'], exist_ok=True) - - # - Raw - _paths['raw'] = join(_paths['data'], 'raw') - makedirs(_paths['raw'], exist_ok=True) - # Copy example csvs - _enames = ['example.csv', 'geodata.csv'] - for _ename in _enames: - s = join(_idir, _ename) - d = join(_paths['raw'], _ename) - if not exists(join(_paths['raw'], _ename)): copyfile(s, d) - - # - Reports - _paths['reports'] = join(_paths['data'], 'reports') - makedirs(_paths['reports'], exist_ok=True) - - # - Tasks - _paths['tasks'] = join(_paths['data'], 'tasks') - makedirs(_paths['tasks'], exist_ok=True) - - # - Uploads - _paths['uploads'] = join(_paths['data'], 'uploads') - makedirs(_paths['uploads'], exist_ok=True) - - # Check for uploads - _example_uploads = ['example_upload_1.json', 'example_zenodo_upload.yaml'] - _udir = join(_dir, 'uploads') - for item in _example_uploads: - s = join(_udir, item) - d = join(_paths['uploads'], item) - if not exists(d): copyfile(s, d) - - # Inventory (normally not used by user) - _paths['inventory'] = '' - - return _paths - -def load_env(env_file): - try: - with open(env_file) as f: - for line in f: - # Ignore empty lines or lines that start with # - if line.startswith('#') or not line.strip(): continue - # Load to local environ - key, value = line.strip().split('=', 1) - environ[key] = value - - except FileNotFoundError: - print('.env file not found') - return False - else: - return True - -def load_blueprints(urls): - blueprints = dict() - for url in urls: - if url is None: continue - _nblueprint = basename(urlparse(str(url)).path).split('.')[0] - _blueprint = get_json_from_url(url) - - if _nblueprint not in blueprints: - blueprints[_nblueprint] = TypeAdapter(Blueprint).validate_python(_blueprint).dict() - - return blueprints - -def get_current_blueprints(): - from scdata._config import config - if not config.is_init: config.get_meta_data() - - return list(config.blueprints.keys()) - -def get_json_from_url(url): - - rjson = None - # Gets a json from an url and returns it as a dict - try: - rget = get(url) - - if rget.status_code == 200 or rget.status_code == 201: - rjson = rget.json() - else: - print (f'Failed request. Response {rget.status_code}') - except: - print ('Failed request. Probably no connection or invalid json file') - pass - - return rjson - -def load_calibrations(urls): - ''' - Loads calibrations from urls. - The calibrations are meant for alphasense's 4 electrode sensors. The files contains: - { - "162031254": { - "ae_electronic_zero_mv": "", - "ae_sensor_zero_mv": "-16.64", - "ae_total_zero_mv": "", - "pcb_gain_mv_na": "0.8", - "we_cross_sensitivity_no2_mv_ppb": "0", - "we_cross_sensitivity_no2_na_ppb": "0", - "we_electronic_zero_mv": "", - "we_sensitivity_mv_ppb": "0.45463999999999993", - "we_sensitivity_na_ppb": "0.5682999999999999", - "we_sensor_zero_mv": "-27.200000000000003", - "we_total_zero_mv": "" - }, - ... - } - Parameters - ---------- - urls: [String] - json file urls - Returns - --------- - Dictionary containing calibrations otherwise None - ''' - - calibrations = dict() - for url in urls: - try: - calibrations = dict_fmerge(get_json_from_url(url), calibrations) - except: - print(f'Problem loading calibrations from {url}') - return None - - return calibrations - -def load_connectors(urls): - - connectors = dict() - for url in urls: - try: - c = get_json_from_url(url) - _nc = basename(urlparse(str(url)).path).split('.')[0] - connectors[_nc] = c - except: - print(f'Problem loading connectors from {url}') - print_exc() - return None - - return connectors - -def load_names(urls): - isn = True - names = dict() - - for url in urls: - result = list() - _nc = basename(urlparse(str(url)).path).split('.')[0] - while isn: - r = get(url) - r.raise_for_status() - # If status code OK, retrieve data - h = process_headers(r.headers) - result += TypeAdapter(List[Name]).validate_python(r.json()) - - if 'next' in h: - if h['next'] == url: isn = False - elif h['next'] != url: url = h['next'] - else: - isn = False - names[_nc] = result - - return names diff --git a/scdata/tools/units.py b/scdata/tools/units.py index f9d67aba..f4ef64d6 100644 --- a/scdata/tools/units.py +++ b/scdata/tools/units.py @@ -1,11 +1,69 @@ from re import search -from scdata.tools import logger -from scdata._config import config +from scdata.tools.custom_logger import logger + +# Molecular weights of certain pollutants for unit convertion +molecular_weights = { + 'CO': 28, + 'NO': 30, + 'NO2': 46, + 'O3': 48, + 'C6H6': 78, + 'SO2': 64, + 'H2S': 34 +} + +# This look-up table is comprised of channels you want always want to have with the same units and that might come from different sources +# i.e. pollutant data in various units (ppm or ug/m3) from different analysers +# The table should be used as follows: +# 'key': 'units', +# - 'key' is the channel that will lately be used in the analysis. It supports regex +# - target_unit is the unit you want this channel to be and that will be converted in case of it being found in the channels list of your source + +# TODO - move to units in pypi +channel_lut = { + "TEMP": "degC", + "HUM": "%rh", + "PRESS": "kPa", + "PM_(\d|[A,B]_\d)": "ug/m3", + "^CO2": "ppm", + "^CO": "ppb", # Always start with CO + "NOISE_A": "dBA", + "NO\Z": "ppb", + "NO2": "ppb", + "NOX": "ppb", + "O3": "ppb", + "C6H6": "ppb", + "H2S": "ppb", + "SO2": "ppb", + "CO2": "ppm" +} + +# This table is used to convert units +# ['from_unit', 'to_unit', 'multiplicative_factor', 'requires_M'] +# - 'from_unit'/'to_unit' = 'multiplicative_factor' +# - 'requires_M' = whether it +# It accepts reverse operations - you don't need to put them twice but in reverse + +unit_convertion_lut = ( + ['%rh', '%', 1, False], + ['ºC', 'degC', 1, False], + ['ppm', 'ppb', 1000, False], + ['mg/m3', 'ug/m3', 1000, False], + ['mgm3', 'ugm3', 1000, False], + ['mg/m3', 'ppm', 24.45, True], + ['mgm3', 'ppm', 24.45, True], + ['ug/m3', 'ppb', 24.45, True], + ['ugm3', 'ppb', 24.45, True], + ['mg/m3', 'ppb', 1000*24.45, True], + ['mgm3', 'ppb', 1000*24.45, True], + ['ug/m3', 'ppm', 1./1000*24.45, True], + ['ugm3', 'ppm', 1./1000*24.45, True] +) def get_units_convf(sensor, from_units): """ Returns a factor which will be multiplied to sensor. It accounts for unit - convertion based on the desired units in the config._channel_lut for each sensor. + convertion based on the desired units in the channel_lut for each sensor. channel_converted = factor * sensor Parameters ---------- @@ -24,25 +82,25 @@ def get_units_convf(sensor, from_units): rfactor = 1 - for channel in config._channel_lut.keys(): + for channel in channel_lut.keys(): if not (search(channel, sensor)): continue # Molecular weight in case of pollutants - for pollutant in config._molecular_weights.keys(): + for pollutant in molecular_weights.keys(): if search(channel, pollutant): - molecular_weight = config._molecular_weights[pollutant] + molecular_weight = molecular_weights[pollutant] break else: molecular_weight = 1 # Check if channel is in look-up table - if config._channel_lut[channel] != from_units and from_units != "": - logger.info(f"Converting units for {sensor}. From {from_units} to {config._channel_lut[channel]}") - for unit in config._unit_convertion_lut: + if channel_lut[channel] != from_units and from_units != "": + logger.info(f"Converting units for {sensor}. From {from_units} to {channel_lut[channel]}") + for unit in unit_convertion_lut: # Get units - if unit[0] == from_units and unit[1] == config._channel_lut[channel]: + if unit[0] == from_units and unit[1] == channel_lut[channel]: factor = unit[2] requires_conc = unit[3] break - elif unit[1] == from_units and unit[0] == config._channel_lut[channel]: + elif unit[1] == from_units and unit[0] == channel_lut[channel]: factor = 1/unit[2] requires_conc = unit[3] break From 6a3b9d3126963e5204f4b713ece208bda4911fb2 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:47:36 +0200 Subject: [PATCH 45/72] Fix in config --- scdata/_config/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 54c1a2cd..bd63ca42 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -33,7 +33,7 @@ class Config(object): # - 'script': no plots in jupyter, updates config # - 'jupyterlab': for plots, updates config # - 'chupiflow': no plots in jupyter, does not update config - _framework = 'script' + framework = 'script' if 'IPython' in sys.modules: _ipython_avail = True else: _ipython_avail = False @@ -735,14 +735,14 @@ def load(self): except KeyError: # Ignore unrecognised data in config print ("Unrecognised config item: %s", k) - if self._framework != 'chupiflow': + if self.framework != 'chupiflow': self.save() def save(self): """ Save current config to file. """ c = dict() for setting in self: - if not setting.startswith('_') and not callable(self.__getitem__(setting)): + if not setting.startswith('_') and not callable(self.__getitem__(setting)) and setting not in ['blueprints', 'names', 'calibrations']: c[setting] = self[setting] _sccpath = join(self.paths['config'], 'config.yaml') From e82c8c8adaf58f21fc12ed1826878e123a0fd2e3 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:53:23 +0200 Subject: [PATCH 46/72] Add CSV handler --- scdata/__init__.py | 3 +- scdata/device/device.py | 20 ++++----- scdata/io/device_file.py | 76 ++++++++++++++++++++++++++--------- scdata/models/__init__.py | 2 +- scdata/models/models.py | 11 +++-- scdata/test/export/to_file.py | 10 +++-- 6 files changed, 81 insertions(+), 41 deletions(-) diff --git a/scdata/__init__.py b/scdata/__init__.py index 81b53c58..a64c53db 100644 --- a/scdata/__init__.py +++ b/scdata/__init__.py @@ -1,5 +1,6 @@ +from ._config import config from .device import Device from .test import Test -from .models import TestOptions, DeviceOptions, APIParams, FileParams +from .models import Source, TestOptions, DeviceOptions, APIParams, FileParams, CSVParams __version__ = '0.9.1' diff --git a/scdata/device/device.py b/scdata/device/device.py index 03bb550d..1fc7b519 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -10,7 +10,7 @@ from scdata.tools.find import find_by_field from scdata._config import config from scdata.io.device_api import * -from scdata.models import Blueprint, Metric, Source, APIParams, FileParams, DeviceOptions, Sensor +from scdata.models import Blueprint, Metric, Source, APIParams, CSVParams, DeviceOptions, Sensor from os.path import join, basename from urllib.parse import urlparse @@ -32,9 +32,9 @@ class Device(BaseModel): blueprint: str = None source: Source = Source() - # Convert this to Options options: DeviceOptions = DeviceOptions() params: object = None + paramsParsed: object = None metrics: List[Metric] = [] meta: dict = dict() hclass: object = None @@ -75,7 +75,6 @@ def model_post_init(self, __context) -> None: # Set handler self.__set_handler__() - # Set blueprint if self.blueprint is not None: if self.blueprint not in config.blueprints: @@ -89,7 +88,7 @@ def model_post_init(self, __context) -> None: else: raise ValueError(f'Specified blueprint url {self.handler.blueprint_url} is not valid') - logger.info(f'Device {self.params.id} initialised') + logger.info(f'Device {self.paramsParsed.id} initialised') def __set_handler__(self): # Add handlers here @@ -112,12 +111,14 @@ def __set_handler__(self): self.hclass = getattr(hmod, self.source.handler) logger.info(f'Setting handler as {self.hclass}') - elif self.source.type == 'file': + self.paramsParsed = TypeAdapter(APIParams).validate_python(self.params) + + elif self.source.type == 'csv': try: module = self.source.module except: # Default to device_file if not specified - module = 'scdata.io.device_file' + module = 'scdata.io.device_file.CSVHandler' logger.warning(f'Module not specified. Defaulting to {module}') pass @@ -130,13 +131,14 @@ def __set_handler__(self): self.hclass = getattr(hmod, self.source.handler) logger.info(f'Setting handler as {self.hclass}') + self.paramsParsed = TypeAdapter(CSVParams).validate_python(self.params) elif self.source.type == 'stream': # TODO Add handler here raise NotImplementedError('No handler for stream yet') # TODO - Fix to be able to pass other things that are not IDs if self.hclass is not None: - self.handler = self.hclass(self.params.id) + self.handler = self.hclass(params = self.paramsParsed) else: raise ValueError("Devices need one handler") @@ -146,7 +148,7 @@ def __set_blueprint_attrs__(self, blueprint): if item not in vars(self): raise ValueError(f'Invalid blueprint item {item}') else: - # Small workaround for postponed fields + # Workaround for postponed fields item_type = self.model_fields[item].annotation self.__setattr__(item, TypeAdapter(item_type).validate_python(blueprint[item])) @@ -617,7 +619,7 @@ def export(self, path, forced_overwrite = False, file_format = 'csv'): logger.error('Cannot export null data') return False if file_format == 'csv': - return export_csv_file(path, str(self.params.id), self.data, forced_overwrite = forced_overwrite) + return export_csv_file(path, str(self.paramsParsed.id), self.data, forced_overwrite = forced_overwrite) else: # TODO Make a list of supported formats return NotImplementedError (f'Not supported format. Formats: [csv]') diff --git a/scdata/io/device_file.py b/scdata/io/device_file.py index e08e8020..4678a4e1 100644 --- a/scdata/io/device_file.py +++ b/scdata/io/device_file.py @@ -1,30 +1,66 @@ from os import makedirs, listdir from os.path import exists, join, splitext +import csv + from scdata.tools.custom_logger import logger from scdata.tools.date import localise_date from scdata.tools.cleaning import clean from pandas import read_csv, to_datetime, DataFrame from scdata._config import config -import csv -# from scdata.models import CSVParams, CSVFiles -from pydantic import BaseModel, ConfigDict - - -# class CSVHandler(BaseModel): -# ''' Main implementation of the device class ''' -# model_config = ConfigDict(arbitrary_types_allowed = True) - -# params: CSVParams = CSVParams() -# files: CSVFiles = CSVFiles() -# method: 'sync' - -# # TODO - Fix -# def export(self): -# return True - -# # TODO - Fix -# def get_data(self): -# return True +from scdata.models import Metric + +class CSVHandler: + ''' Main implementation of the CSV data class ''' + + def __init__(self, params): + self.id = params.id + self.params = params + self.method = 'sync' + self.data = DataFrame() + self._metrics: List[Metric] = [] + self.latest_postprocessing = None + if not self.__check__(): + raise FileExistsError(f'File not found: {self.params.path}') + + def __check__(self): + return exists(self.params.path) + + @property + def timezone(self): + return self.params.timezone + + # This returns an empty list to avoid renaming CSVs + @property + def sensors(self): + return [] + + def update_latest_postprocessing(self, date): + + try: + self.latest_postprocessing = date.to_pydatetime() + except: + return False + else: + logger.info(f"Updated latest_postprocessing to: {self.latest_postprocessing}") + return True + + logger.info('Nothing to update') + + return True + + def get_data(self, **kwargs): + self.data = read_csv_file(self.params.path, + timezone= self.timezone, + frequency= kwargs['frequency'], + clean_na= kwargs['clean_na'], + index_name= self.params.index, + skiprows=self.params.header_skip, + sep=self.params.separator, + tzaware=self.params.tzaware, + resample=kwargs['resample'] + ) + + return self.data def export_csv_file(path, file_name, df, forced_overwrite=False): ''' diff --git a/scdata/models/__init__.py b/scdata/models/__init__.py index 8c45e6a7..8c9c09c9 100644 --- a/scdata/models/__init__.py +++ b/scdata/models/__init__.py @@ -1 +1 @@ -from .models import TestOptions, DeviceOptions, Metric, Name, Blueprint, Source, Sensor, FileParams, APIParams \ No newline at end of file +from .models import TestOptions, DeviceOptions, Metric, Name, Blueprint, Source, Sensor, FileParams, APIParams, CSVParams \ No newline at end of file diff --git a/scdata/models/models.py b/scdata/models/models.py index 2f4da6d9..b89250d2 100644 --- a/scdata/models/models.py +++ b/scdata/models/models.py @@ -31,14 +31,15 @@ class APIParams(BaseModel): id: int class FileParams(BaseModel): - id: str # Compatible with API id - header_skip: Optional[List[int]] = [] + id: int # To be compatible with API id + path: str + +class CSVParams(FileParams): + header_skip: Optional[List[int]] = [1,2,3] index: Optional[str] = 'TIME' separator: Optional[str] = ',' tzaware: Optional[bool] = True timezone: Optional[str] = "UTC" - processed: Optional[str] = None - raw: Optional[str] = None class DeviceOptions(BaseModel): clean_na: Optional[bool] = None @@ -50,11 +51,9 @@ class DeviceOptions(BaseModel): class Blueprint(BaseModel): meta: dict = dict() metrics: List[Metric] = [] - source: Source = Source() class Name(BaseModel): id: int name: str description: str unit: str - diff --git a/scdata/test/export/to_file.py b/scdata/test/export/to_file.py index 7fafd458..26cf0c05 100755 --- a/scdata/test/export/to_file.py +++ b/scdata/test/export/to_file.py @@ -27,11 +27,13 @@ def to_csv(self, path = None, forced_overwrite = False): else: epath = path # Export to csv - for device in self.devices.keys(): - export_ok &= self.devices[device].export(epath, forced_overwrite = forced_overwrite) + for device in self.devices: + export_ok &= device.export(epath, forced_overwrite = forced_overwrite) - if export_ok: logger.info(f'Test {self.full_name} exported successfully') - else: logger.error(f'Test {self.full_name} not exported successfully') + if export_ok: + logger.info(f'Test {self.name} exported successfully') + else: + logger.error(f'Error while exporting test: {self.name}') return export_ok From ebbacd3094242f4671d9610c775a880c8d003b36 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:54:04 +0200 Subject: [PATCH 47/72] Add options to test --- scdata/test/test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scdata/test/test.py b/scdata/test/test.py index 3e763612..513d4aa6 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -65,7 +65,8 @@ def model_post_init(self, __context) -> None: tj = json.load(file) self.devices = TypeAdapter(List[Device]).validate_python(tj['devices']) - self.options = TypeAdapter(Options).validate_python(tj['options']) + self.options = TypeAdapter(TestOptions).validate_python(tj['options']) + print (tj['meta']) self.type = tj['meta']['type'] if self.name != tj['meta']['name']: raise ValueError('Name not matching') @@ -79,6 +80,8 @@ def model_post_init(self, __context) -> None: # self._dispersion_summary = None # self.common_channels = None + logger.info(f'Test {self.name} initialized') + def __str__(self): return self.__full_name__ From 3a6d68ddcf49c924a93149b7611d4992de7b2d38 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:54:22 +0200 Subject: [PATCH 48/72] Missing device params --- scdata/device/device.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scdata/device/device.py b/scdata/device/device.py index 1fc7b519..34ce103d 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -181,7 +181,7 @@ def valid_for_processing(self): @property def id(self): - return self.params.id + return self.paramsParsed.id def add_metric(self, metric = dict()): ''' From 8c8d5bacfb00f21d27a22d6bb857218b9271faa9 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:54:50 +0200 Subject: [PATCH 49/72] Logger information for device --- scdata/device/device.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scdata/device/device.py b/scdata/device/device.py index 34ce103d..352b4047 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -348,9 +348,10 @@ def __load_wrapup__(self, max_amount, convert_units=True, convert_names=True, ca self.__convert_units__() self.postprocessing_updated = False else: - logger.warning('Empty dataframe in data') + logger.info('Empty dataframe in loaded data. Waiting for cache...') if not cached_data.empty: + logger.info('Cache exists') self.data = self.data.combine_first(cached_data) return not self.data.empty From 0f97c583d015e675fbf35803a87f573b674da860 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Tue, 30 Apr 2024 10:55:09 +0200 Subject: [PATCH 50/72] Avoid paralelizing device loads --- scdata/test/test.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/scdata/test/test.py b/scdata/test/test.py index 513d4aa6..a9f9ce27 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -356,18 +356,14 @@ async def load(self): ''' logger.info('Loading test...') - tasks = [] - semaphore = asyncio.Semaphore(config._max_concurrent_requests) - for device in self.devices: # Check for cached data cached_file_path = '' if self.options.cache: tentative_path = join(self.path, 'cached', f'{device.id}.csv') if exists(tentative_path): cached_file_path = tentative_path - # Append task - tasks.append(asyncio.ensure_future(device.load(cache=cached_file_path))) - await asyncio.gather(*tasks) + # Load device (no need to go async, it's fast enough) + await device.load(cache=cached_file_path) logger.info('Test load done') if self.options.cache: self.cache() From 748021fb8796bacb7527da39f5a2e040142d99d3 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:02:43 +0200 Subject: [PATCH 51/72] Update examples --- examples/README.md | 21 +- examples/notebooks/01_getting_started.ipynb | 184 ++--- examples/notebooks/02_access_data.ipynb | 646 ------------------ .../notebooks/03_data_visualisation.ipynb | 87 ++- examples/notebooks/04_processing_data.ipynb | 271 +++++--- .../05_sensor_calibration_workflows.ipynb | 405 ----------- examples/notebooks/06_upload_to_zenodo.ipynb | 137 ---- .../notebooks/08_querying_the_sc_api.ipynb | 59 +- examples/notebooks/09_load_and_post.ipynb | 82 +-- examples/notebooks/10_basic_workflow.ipynb | 397 ----------- .../11_making_html-pdf_reports.ipynb | 208 ------ examples/notebooks/12_geolocated_data.ipynb | 228 ------- .../notebooks/13_pdf_largescale_plots.ipynb | 73 +- .../14-easy-dispersion-analysis.ipynb | 150 ---- examples/notebooks/15-R-basic.ipynb | 22 +- examples/notebooks/16-device_processing.ipynb | 51 +- examples/notebooks/todo/batch_analysis.ipynb | 74 -- .../notebooks/todo/dispersion_analysis.ipynb | 484 ------------- examples/notebooks/todo/geo_data.ipynb | 361 ---------- examples/notebooks/todo/model_creation.ipynb | 278 -------- examples/notebooks/todo/stream_sck.ipynb | 286 -------- .../notebooks/todo/stream_serial_device.ipynb | 238 ------- .../notebooks/wip-load-process-test.ipynb | 371 ---------- examples/scripts/change_kit_id.py | 29 - examples/scripts/get_device_data.py | 12 +- examples/scripts/get_device_macs.py | 18 - examples/scripts/process_data.py | 12 - 27 files changed, 452 insertions(+), 4732 deletions(-) delete mode 100644 examples/notebooks/02_access_data.ipynb delete mode 100644 examples/notebooks/05_sensor_calibration_workflows.ipynb delete mode 100644 examples/notebooks/06_upload_to_zenodo.ipynb delete mode 100644 examples/notebooks/10_basic_workflow.ipynb delete mode 100644 examples/notebooks/11_making_html-pdf_reports.ipynb delete mode 100644 examples/notebooks/12_geolocated_data.ipynb delete mode 100644 examples/notebooks/14-easy-dispersion-analysis.ipynb delete mode 100644 examples/notebooks/todo/batch_analysis.ipynb delete mode 100644 examples/notebooks/todo/dispersion_analysis.ipynb delete mode 100755 examples/notebooks/todo/geo_data.ipynb delete mode 100644 examples/notebooks/todo/model_creation.ipynb delete mode 100755 examples/notebooks/todo/stream_sck.ipynb delete mode 100644 examples/notebooks/todo/stream_serial_device.ipynb delete mode 100644 examples/notebooks/wip-load-process-test.ipynb delete mode 100644 examples/scripts/change_kit_id.py delete mode 100644 examples/scripts/get_device_macs.py delete mode 100644 examples/scripts/process_data.py diff --git a/examples/README.md b/examples/README.md index 698ea4cf..b04d6f11 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,13 +1,12 @@ # Index of examples -This folder contains examples in either script `.py` or notebook `.ipynb` files regarding different topics. +This folder contains examples in either script `.py` or notebook `.ipynb` files regarding different topics. The folder is being updated to the new changes. Those examples marked as `(WIP)` are currently not functional (feel free to help or improve via issue/pull request). -## Scripts +## Script - Get device data: [scripts/get_device_data.py](scripts/get_device_data.py) -- Get devices by location and mac addresses: [scripts/get_device_data.py](scripts/get_device_macs.py) -- Process data based on blueprints: [scripts/process_data.py](scripts/process_data.py) -- Kit ID changes (only with admin priviledges): [scripts/change_kit_id.py](scripts/change_kit_id.py) +- (WIP) Get devices by location and mac addresses: [scripts/get_device_data.py](scripts/get_device_macs.py) +- (WIP) Process data based on blueprints: [scripts/process_data.py](scripts/process_data.py) ## Notebooks @@ -15,15 +14,15 @@ This folder contains examples in either script `.py` or notebook `.ipynb` files - Getting even more data from different sources: [notebooks/02_access_data.ipynb](notebooks/02_access_data.ipynb) - Plotting data: [notebooks/03_data_visualisation.ipynb](notebooks/03_data_visualisation.ipynb) - Processing data: [notebooks/04_processing_data.ipynb](notebooks/04_processing_data.ipynb) -- Sensor calibration: [notebooks/05_sensor_calibration_workflows.ipynb](notebooks/05_sensor_calibration_workflows.ipynb) -- Zenodo uploads: [notebooks/06_upload_to_zenodo.ipynb](notebooks/06_upload_to_zenodo.ipynb) +- (WIP) Sensor calibration: [notebooks/05_sensor_calibration_workflows.ipynb](notebooks/05_sensor_calibration_workflows.ipynb) +- (WIP) Zenodo uploads: [notebooks/06_upload_to_zenodo.ipynb](notebooks/06_upload_to_zenodo.ipynb) - Query the SC API to find devices: [notebooks/08_querying_the_sc_api.ipynb](notebooks/08_querying_the_sc_api.ipynb) - Load and post data (concatenating SD card data too): [notebooks/09_load_and_post.ipynb](notebooks/09_load_and_post.ipynb) -- Basic analysis workflow: [notebooks/10_basic_workflow.ipynb](notebooks/10_basic_workflow.ipynb) -- Making HTML-PDF reports: [notebooks/11_making_html-pdf_reports.ipynb](notebooks/11_making_html-pdf_reports.ipynb) -- Visualizing Geolocated data: [notebooks/12_geolocated_data.ipynb](notebooks/12_geolocated_data.ipynb) +- (WIP) Basic analysis workflow: [notebooks/10_basic_workflow.ipynb](notebooks/10_basic_workflow.ipynb) +- (WIP) Making HTML-PDF reports: [notebooks/11_making_html-pdf_reports.ipynb](notebooks/11_making_html-pdf_reports.ipynb) +- (WIP) Visualizing Geolocated data: [notebooks/12_geolocated_data.ipynb](notebooks/12_geolocated_data.ipynb) - Large format plots: [notebooks/13-pdf-largescale-plots.ipynb](notebooks/13-pdf-largescale-plots.ipynb) -- Easy dispersion analysis: [notebooks/14-easy-dispersion-analysis.ipynb](notebooks/14-easy-dispersion-analysis.ipynb) +- (WIP) Easy dispersion analysis: [notebooks/14-easy-dispersion-analysis.ipynb](notebooks/14-easy-dispersion-analysis.ipynb) - R basic example: [notebooks/15-R-basic-example.ipynb](notebooks/15-R-basic-example.ipynb) - Load, process and post device: [notebooks/16-device_processing.ipynb](notebooks/16-device_processing.ipynb) diff --git a/examples/notebooks/01_getting_started.ipynb b/examples/notebooks/01_getting_started.ipynb index 5e350952..38187a75 100644 --- a/examples/notebooks/01_getting_started.ipynb +++ b/examples/notebooks/01_getting_started.ipynb @@ -49,23 +49,15 @@ "metadata": {}, "outputs": [], "source": [ - "from scdata._config import config\n", - "\n", - "# Output levels:\n", - "# 'QUIET': nothing, \n", - "# 'NORMAL': warn, err\n", - "# 'DEBUG': info, warn, err, success\n", - "config._out_level='DEBUG' \n", - "\n", - "# This defaults to script'. If using it in jupyterlab, sets the plot renderers accordingly\n", - "config._framework='jupyterlab'" + "from scdata._config import config # Same as python logging module\n", + "config.log_level='DEBUG'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We can put the data in a test. A test is simply a handy way to group devices and process them" + "We can load data from a `CSV` file (path is relative to the notebook):" ] }, { @@ -74,14 +66,18 @@ "metadata": {}, "outputs": [], "source": [ - "test = Test('EXAMPLE_MINKE_DEMO')" + "csv_device = sc.Device(blueprint='sc_air',\n", + " source={'type':'csv', \n", + " 'handler': 'CSVHandler',\n", + " 'module': 'scdata.io.device_file'},\n", + " params=sc.CSVParams(id=16871, path='../../scdata/tools/interim/example.csv', timezone='Europe/Madrid'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Before adding devices to the test, we import the Device object" + "Or from the SC API, much simpler:" ] }, { @@ -90,21 +86,15 @@ "metadata": {}, "outputs": [], "source": [ - "from scdata import Device" + "api_device = sc.Device(blueprint='sc_air',\n", + " params=sc.APIParams(id=16784))" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Add as many devices as needed. See understanding blueprints below for more info\n", - "test.add_device(Device(blueprint = 'sc_21_station_module', descriptor = {'source': 'api', \n", - " 'id': '14638'\n", - " }\n", - " )\n", - " )" + "If the device already has a blueprint, it's not necessary to add it here:" ] }, { @@ -113,31 +103,14 @@ "metadata": {}, "outputs": [], "source": [ - "# Add as many devices as needed. See understanding blueprints below for more info\n", - "test.add_device(Device(blueprint = 'sc_21_station_module', descriptor = {'source': 'api', \n", - " 'id': '14627', \n", - " 'min_date': '2021-10-15'\n", - " }\n", - " )\n", - " )" + "api_device_blueprint = sc.Device(params=sc.APIParams(id=15618))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The blueprint urls can be found at `~/.config/scdata/config.yaml` or here:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "config.blueprints" + "We can put the data in a test. A test is simply a handy way to group devices and process them:" ] }, { @@ -146,47 +119,44 @@ "metadata": {}, "outputs": [], "source": [ - "config.blueprints.keys()" + "test = sc.Test(name='EXAMPLE',\n", + " devices=[csv_device, api_device, api_device_blueprint],\n", + " force_recreate=True)\n", + "test.options.cache=True" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This is how you can add a csv device (from sdcard data or other). The raw data file is to be put in `~/.cache/scdata/raw`:" + "The blueprint urls can be found at `~/.config/scdata/config.yaml` or here:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "test.add_device(Device(blueprint = 'sck_21' , descriptor = {'source': 'csv',\n", - " 'id': 'csv_device',\n", - " 'raw_data_file': 'example.csv',\n", - " 'frequency': '1Min',\n", - " 'timezone': 'Europe/Madrid'\n", - " }\n", - " )\n", - " )" + "config.blueprints" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "This instruction will create the test folder structure, the description and anything needed to keep track of the data:" + "config.blueprints.keys()" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Create the test\n", - "test.create()" + "Load the data" ] }, { @@ -197,8 +167,7 @@ }, "outputs": [], "source": [ - "# Load it\n", - "test.load()" + "await test.load()" ] }, { @@ -208,17 +177,20 @@ "outputs": [], "source": [ "# Check the devices\n", - "test.devices" + "for device in test.devices:\n", + " print (device.id)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "# Check the data\n", - "test.devices['14638'].readings" + "# Check all the info in the device\n", + "test.get_device(16871)" ] }, { @@ -249,39 +221,14 @@ }, "outputs": [], "source": [ - "config.blueprints['sck_21']" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Each of the properties in a device is defined in it's blueprint. For instance, the sensors available. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "config.blueprints['sck_21']['sensors']" + "config.blueprints['sc_air']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the case of devices coming from the SC platform, these sensors are defined based on [these definitions](https://api.smartcitizen.me/v0/kits), and are meant for traceability of the data, explaining what each device contains. For other devices, this has to be filled out manually." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The bluepring also contains the metrics (or processed channels) associated with the sensors. These metrics will be calculated once test.process() or device.process() are called" + "Each of the properties in a device is defined in it's blueprint. The bluepring contains the metrics (or processed channels) associated with the sensors. These metrics will be calculated once test.process() or device.process() are called" ] }, { @@ -292,14 +239,14 @@ }, "outputs": [], "source": [ - "config.blueprints['sck_21']['metrics']" + "config.blueprints['sc_air']['metrics']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, to add your own blueprint, you can add the url directly in the `config.yaml` file or by:" + "Finally, to add your own blueprint, you can add an *url* pointing to a valid json directly in the `config.yaml` file or by:" ] }, { @@ -319,7 +266,7 @@ "## Data structure\n", "Here we show how the data is structured. A test contains devices, metadata and models (if created, see sensor_calibration_workflows.ipynb)\n", "\n", - "First the tests:" + "First the devices:" ] }, { @@ -328,7 +275,7 @@ "metadata": {}, "outputs": [], "source": [ - "list(test.devices.keys())" + "list(test.devices)" ] }, { @@ -344,26 +291,7 @@ "metadata": {}, "outputs": [], "source": [ - "test.devices['14638'].readings.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The test description information can be accessed and modified in the `test.descriptor` `dict()`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "test.descriptor" + "test.get_device(16871).data.head(5)" ] }, { @@ -395,41 +323,41 @@ "metadata": {}, "outputs": [], "source": [ - "test.devices['14638'].export(path ='~/Desktop')" + "test.get_device(16871).export(path ='~/Desktop')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Export the data and test descriptor file" + "Export the data:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ - "test.to_html(title='Example');" + "test.to_csv()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Or save the whole test in the default test processed folder:" + "Or save the whole test in the default test processed folder (to-do):" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "test.to_csv()" + "# test.to_html(title='Example');" ] }, { @@ -442,7 +370,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -456,7 +384,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/02_access_data.ipynb b/examples/notebooks/02_access_data.ipynb deleted file mode 100644 index d594aec0..00000000 --- a/examples/notebooks/02_access_data.ipynb +++ /dev/null @@ -1,646 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Load data from open data APIs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These examples will show how to get data from different air quality APIs using scdata. Possible sources are:\n", - "\n", - "- Smart Citizen API\n", - "- Nilu\n", - "- MUV2020.eu project\n", - "- Open data portal from Agencia Salut Publica Catalunya" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "heading_collapsed": "true" - }, - "source": [ - "## Smart Citizen API" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The documentation for accessing the Smart Citizen API is available in [developer.smartcitizen.me/](http://developer.smartcitizen.me/).\n", - "\n", - "This example will show the basic interactions with the devices in the SmartCitizen API." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.io.device_api import ScApiDevice" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get the device data based on it's ID" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device = ScApiDevice('10712')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get the device's location" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_lat_long()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_timezone()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get the kit ID (what type of KIT it is - i.e.: SCK 2.0, 2.1, 1.5, Station...)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_kit_ID()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get the date of the last reading" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_last_reading()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get the device data and put it in a pandas dataframe. Options are available regarding rollup, start and end dates and if you want to clean or not NaNs or missing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_sensors()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_data(min_date = '2021-01-01', max_date = None, frequency = '1Min', clean_na = None);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get a glympse of what's in the data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.data.describe()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Get access to the data and start doing things" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.data.head(4)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "heading_collapsed": "true" - }, - "source": [ - "### Filtering devices\n", - "\n", - "We can also retrieve data from SmartCitizen API by filtering extra information such as location, date, tags, etc.\n", - "\n", - "We will be using the following function:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "help(ScApiDevice.get_world_map)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can filter the devices by date, city, area, and tags" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Get devices in Barcelona\n", - "ScApiDevice.get_world_map(city='Barcelona')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Get devices in Barcelona that posted after 2018-12-31\n", - "ScApiDevice.get_world_map(max_date= '2018-12-31', city='Barcelona')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Get devices in Barcelona that started posting before 2018-12-31\n", - "ScApiDevice.get_world_map(min_date= '2018-12-31', city='Barcelona')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Get devices in Barcelona that posted during 2019\n", - "ScApiDevice.get_world_map(min_date= '2018-12-31', max_date= '2019-12-31', city='Barcelona')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get devices in Barcelona, online AND outdoor\n", - "ScApiDevice.get_world_map(city='Barcelona', tags=['outdoor', 'online'], tag_method = 'all')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# Get devices in Barcelona, online OR outdoor\n", - "ScApiDevice.get_world_map(city='Barcelona', tags=['outdoor', 'online'], tag_method = 'any')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get devices within a certain area - lat = 41.402706, long = 2.174350, in a radius of 200m\n", - "ScApiDevice.get_world_map(within = (41.402706, 2.174350, 200))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get devices within a certain area - lat = 41.402706, long = 2.174350, in a radius of 200m, that are outdoor and online\n", - "ScApiDevice.get_world_map(within = (41.402706, 2.174350, 200), tags=['outdoor', 'online'], tag_method = 'all')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "heading_collapsed": "true" - }, - "source": [ - "## MUV2020.eu API" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example shows how to access data from the MUV2020, available at https://data.waag.org/api/muv" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.io.read_api import MuvApiDevice\n", - "from scdata._config import config\n", - "\n", - "if not config.is_init: config.get_meta_data()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "muvdev = MuvApiDevice('11707152')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "muvdev.get_device_timezone()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "muvdev.get_device_sensors()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "muvdev.get_device_data()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "heading_collapsed": "true" - }, - "source": [ - "## NILU API" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example shows how to access data from the NILU API, available at https://sensors.nilu.no" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.io.device_api import NiluApiDevice" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device = NiluApiDevice(170)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_sensors(True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_last_reading(True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "data = device.get_device_data(min_date = '2020-12-01', clean_na = 'drop')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.data.describe()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.data.head(4)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Open data portal\n", - "\n", - "This example shows how to retrieve data from the `analisis.transparenciacatalunya.cat`. The documentation for this api can be found [here](https://dev.socrata.com/foundry/analisi.transparenciacatalunya.cat/uy6k-2s8r)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.io.device_api import DadesObertesApiDevice" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can get data for a device knowing it's id (`codi_eoi`)) or a nearby location. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device = DadesObertesApiDevice(did = 8019043)\n", - "print (device.id)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternatively, the device can be downloaded using a (lat, long) location and a radius in meters (the API does not respond with great accuracy, so better to be somehow precise - radius < 2000-3000m):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device = DadesObertesApiDevice(within = (41.385494, 2.154074, 2000))\n", - "print (device.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_timezone()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_sensors()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.get_device_data(min_date = '2019-10-01', max_date = '2020-04-01', frequency = '1H')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also query a better filtering by using the get_world_map staticmethod" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "help(DadesObertesApiDevice.get_world_map)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Barcelona stations\n", - "barcelona_stations = DadesObertesApiDevice.get_world_map(city='Barcelona')\n", - "print ('Stations in Barcelona')\n", - "print (barcelona_stations)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Barcelona traffic stations\n", - "bt_stations = DadesObertesApiDevice.get_world_map(city='Barcelona')\n", - "print ('Traffic stations in Barcelona')\n", - "print (bt_stations)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get stations in Barcelona of type: 'traffic'\n", - "DadesObertesApiDevice.get_world_map(city='Barcelona', station_type = 'traffic')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get stations in Barcelona of type: 'traffic'\n", - "DadesObertesApiDevice.get_world_map(city='Barcelona', station_type = 'background')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# All urban stations\n", - "urban_stations = DadesObertesApiDevice.get_world_map(area_type='urban')\n", - "print ('Urban Stations')\n", - "print (urban_stations)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "celltoolbar": "Raw Cell Format", - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - }, - "toc": { - "nav_menu": { - "height": "357px", - "width": "307px" - }, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": true, - "toc_position": { - "height": "48px", - "left": "552px", - "top": "705.497px", - "width": "315px" - }, - "toc_section_display": true, - "toc_window_display": true - }, - "toc-autonumbering": false, - "toc-showcode": false, - "toc-showmarkdowntxt": false, - "toc-showtags": false - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/03_data_visualisation.ipynb b/examples/notebooks/03_data_visualisation.ipynb index d020f70e..e6e4b29d 100644 --- a/examples/notebooks/03_data_visualisation.ipynb +++ b/examples/notebooks/03_data_visualisation.ipynb @@ -18,8 +18,25 @@ "from scdata.test import Test\n", "from scdata._config import config\n", "\n", - "config._out_level='DEBUG'\n", - "config.framework='jupyterlab'" + "config.log_level='DEBUG'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the test from our first example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test = Test(name='EXAMPLE')" ] }, { @@ -30,9 +47,7 @@ }, "outputs": [], "source": [ - "# Load the test from the other example\n", - "test = Test('EXAMPLE')\n", - "test.load()" + "await test.load()" ] }, { @@ -56,7 +71,7 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": \"14638\",\n", + " \"1\": {\"devices\": 16871,\n", " \"channel\": \"NOISE_A\",\n", " \"subplot\": 1},\n", " }\n", @@ -84,7 +99,7 @@ " #\"extras\": ['mean', 'max', 'min'], This is handy when comparing devices,\n", " },\n", " \"2\": {\"devices\": \"all\",\n", - " \"channel\": \"SCD30_CO2\",\n", + " \"channel\": \"HUM\",\n", " \"subplot\": 2},\n", " \"3\": {\"devices\": \"all\",\n", " \"channel\": \"TEMP\",\n", @@ -96,12 +111,12 @@ " \"show\": True,\n", " \"frequency\": '1Min',\n", " \"clean_na\": None,\n", - " \"max_date\": '2021-10-20',\n", - " \"min_date\": '2021-10-15'\n", + " #\"max_date\": '2021-10-20',\n", + " #\"min_date\": '2021-10-15'\n", " }\n", "\n", "formatting = {\"xlabel\": \"Date\",\n", - " \"ylabel\": {1: \"Noise (dBA Scale)\", 2: \"CO2 (ppm)\", 3: \"Temperature (degC)\"},\n", + " \"ylabel\": {1: \"Noise (dBA Scale)\", 2: \"Humidity (%rh)\", 3: \"Temperature (degC)\"},\n", " \"yrange\": {1: [0, 90], 2: [400, 1400], 3: [5, 40]},\n", " \"xrange\": {1: ['2021-10-15', '2021-10-20 18:00']},\n", " \"title\": \"Example plot\",\n", @@ -125,15 +140,6 @@ "# figure.savefig('~/Desktop/plot.png', dpi = 300, transparent=False, bbox_inches='tight')" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "figure.savefig('/Users/macoscar/Desktop/plot_2.png', dpi = 300, transparent=False, bbox_inches='tight')" - ] - }, { "cell_type": "markdown", "metadata": { @@ -150,10 +156,13 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ - "\n", "traces = {\n", " \"1\": {\"devices\": \"10751\",\n", " \"channel\": [\"PM_1\", \"PM_25\", \"ADC_48_2\", \"ADC_48_3\"],\n", @@ -190,7 +199,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "jupyter": { + "source_hidden": true + } + }, "outputs": [], "source": [ "traces = {\n", @@ -243,10 +256,10 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": '14627',\n", + " \"1\": {\"devices\": 16871,\n", " \"channel\": \"TEMP\",\n", " \"subplot\": 1},\n", - " \"2\": {\"devices\": '14627',\n", + " \"2\": {\"devices\": 16871,\n", " \"channel\": \"NOISE_A\",\n", " \"subplot\": 2},\n", " }\n", @@ -285,9 +298,9 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": \"14638\",\n", + " \"1\": {\"devices\": 16871,\n", " \"channel\": \"HUM\"},\n", - " \"2\": {\"devices\": \"14638\",\n", + " \"2\": {\"devices\": 16871,\n", " \"channel\": \"TEMP\"} \n", " }\n", "\n", @@ -321,9 +334,9 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": \"14638\",\n", + " \"1\": {\"devices\": 16871,\n", " \"channel\": \"TEMP\"},\n", - " \"2\": {\"devices\": \"14638\",\n", + " \"2\": {\"devices\": 16871,\n", " \"channel\": \"TEMP\"} \n", " }\n", "\n", @@ -351,7 +364,7 @@ "\n", "\n", "figure = test.ts_scatter(traces = traces, options = options, formatting = formatting);\n", - "figure.savefig('/Users/macoscar/Desktop/plot.png', dpi = 300, transparent=False, bbox_inches='tight')" + "#figure.savefig('~/Desktop/plot.png', dpi = 300, transparent=False, bbox_inches='tight')" ] }, { @@ -375,8 +388,8 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": \"14627\",\n", - " \"channel\": \"SCD30_CO2\"} \n", + " \"1\": {\"devices\": 16871,\n", + " \"channel\": \"NOISE_A\"} \n", " }\n", "\n", "options = {\n", @@ -385,7 +398,7 @@ " \"clean_na\": None,\n", " }\n", "\n", - "formatting = {\"title\": \"CO2 Heatmap\",\n", + "formatting = {\"title\": \"Temperature Heatmap\",\n", " \"grid\": True,\n", " \"legend\": True,\n", " \"height\": 10,\n", @@ -441,7 +454,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Boxplot" + "## Boxplot (to-fix)" ] }, { @@ -451,7 +464,7 @@ "outputs": [], "source": [ "traces = {\n", - " \"1\": {\"devices\": \"14627\",\n", + " \"1\": {\"devices\": 16871,\n", " \"channel\": \"NOISE_A\"} \n", " }\n", "\n", @@ -478,7 +491,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## More complex plots" + "## More complex plots (to-fix)" ] }, { @@ -546,7 +559,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -560,7 +573,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/04_processing_data.ipynb b/examples/notebooks/04_processing_data.ipynb index 2b454f3c..f365849c 100644 --- a/examples/notebooks/04_processing_data.ipynb +++ b/examples/notebooks/04_processing_data.ipynb @@ -23,11 +23,27 @@ "source": [ "from scdata.test import Test\n", "from scdata.device import Device\n", - "from scdata._config import config\n", - "\n", - "config.out_level='DEBUG'\n", - "\n", - "test = Test('EXAMPLE_0.7.0')" + "from scdata._config import config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config._log_level='INFO'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test = Test(name='EXAMPLE')" ] }, { @@ -38,7 +54,7 @@ }, "outputs": [], "source": [ - "test.load()" + "await test.load()" ] }, { @@ -48,6 +64,16 @@ "## Process basics" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for device in test.devices: \n", + " print (device.id)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -57,7 +83,7 @@ "outputs": [], "source": [ "## The readings for each device are accessible via\n", - "test.devices['14627'].readings" + "test.get_device(16784).data" ] }, { @@ -73,7 +99,7 @@ "metadata": {}, "outputs": [], "source": [ - "df = test.devices['14627'].readings" + "df = test.get_device(16871).data" ] }, { @@ -82,7 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "df['METRIC'] = 8 * df['ADC_48_1'] + 25 * df['ADC_48_3']" + "df['METRIC'] = 8 * df['TEMP'] + 25 * df['PRESS']" ] }, { @@ -91,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "df[['ADC_48_1', 'ADC_48_3', 'METRIC']]" + "df[['TEMP', 'PRESS', 'METRIC']]" ] }, { @@ -109,20 +135,17 @@ }, "outputs": [], "source": [ - "## The sensors for each device are accessible via\n", - "test.devices['14627'].sensors" + "## The metrics for each device are accessible via\n", + "test.get_device(16784).metrics" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ - "## The metrics for each device are accessible via\n", - "test.devices['14627'].metrics" + "help(Test.process)" ] }, { @@ -131,7 +154,7 @@ "metadata": {}, "outputs": [], "source": [ - "help(Test.process)" + "d = test.get_device(16871)" ] }, { @@ -161,7 +184,7 @@ "outputs": [], "source": [ "for device in test.devices:\n", - " print (test.devices[device].readings.columns)" + " print (device.data.columns)" ] }, { @@ -229,13 +252,33 @@ "metadata": {}, "outputs": [], "source": [ - "metric = {f'ADC_POLY': {'process': 'poly_ts',\n", - " 'kwargs': {'channels': ['ADC_48_1', 'ADC_48_3'],\n", - " 'coefficients': [8, 25]}\n", - " }}\n", + "from scdata.models import Metric" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "metric = Metric(name='TP_Poly',\n", + " description='Basic Polynomial calculation',\n", + " function='poly_ts',\n", + " kwargs= {'channels': ['TEMP', 'PRESS'], 'coefficients': [8, 25]}\n", + " )\n", "\n", - "test.devices['14627'].add_metric(metric)\n", - "test.devices['14627'].process(lmetrics=['ADC_POLY'])" + "test.get_device(16871).add_metric(metric)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test.get_device(16871).process(lmetrics=['TP_Poly'])" ] }, { @@ -247,7 +290,7 @@ }, "outputs": [], "source": [ - "test.devices['14627'].readings" + "test.get_device(16871).data.loc[:,['TEMP', 'PRESS', 'TP_Poly']]" ] }, { @@ -256,14 +299,14 @@ "metadata": {}, "outputs": [], "source": [ - "traces = {1: {'devices': '14627',\n", - " 'channel': 'ADC_POLY',\n", + "traces = {1: {'devices': 16871,\n", + " 'channel': 'TP_Poly',\n", " 'subplot': 2},\n", - " 2: {'devices': '14627',\n", - " 'channel': 'ADC_48_1',\n", + " 2: {'devices': 16871,\n", + " 'channel': 'TEMP',\n", " 'subplot': 1},\n", - " 3: {'devices': '14627',\n", - " 'channel': 'ADC_48_3',\n", + " 3: {'devices': 16871,\n", + " 'channel': 'PRESS',\n", " 'subplot': 1}, \n", " }\n", "\n", @@ -287,13 +330,7 @@ "metadata": {}, "outputs": [], "source": [ - "metric = {f'NOISE_A_SMOOTH': {'process': 'rolling_avg',\n", - " 'kwargs': {'name': ['NOISE_A'],\n", - " 'window_size': 5}\n", - " }}\n", - "\n", - "test.devices['14627'].add_metric(metric)\n", - "test.devices['14627'].process(lmetrics=['NOISE_A_SMOOTH'])" + "help(scdata.device.process.timeseries.rolling_avg)" ] }, { @@ -302,13 +339,23 @@ "metadata": {}, "outputs": [], "source": [ - "metric = {f'NOISE_A_SMOOTH_10': {'process': 'rolling_avg',\n", - " 'kwargs': {'name': ['NOISE_A'],\n", - " 'window_size': 10}\n", - " }}\n", - "\n", - "test.devices['14627'].add_metric(metric)\n", - "test.devices['14627'].process(lmetrics=['NOISE_A_SMOOTH_10'])" + "metric = Metric(name='NOISE_A_SMOOTH',\n", + " description='Basic smoothing calculation',\n", + " function='rolling_avg',\n", + " kwargs= {'name': ['NOISE_A'], 'window_size': 5}\n", + " )\n", + "test.get_device(16871).add_metric(metric)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test.get_device(16871).process(lmetrics=['NOISE_A_SMOOTH'])" ] }, { @@ -317,13 +364,48 @@ "metadata": {}, "outputs": [], "source": [ - "metric = {f'NOISE_A_SMOOTH_60': {'process': 'rolling_avg',\n", - " 'kwargs': {'name': ['NOISE_A'],\n", - " 'window_size': 60}\n", - " }}\n", - "\n", - "test.devices['14627'].add_metric(metric)\n", - "test.devices['14627'].process(lmetrics=['NOISE_A_SMOOTH_60'])" + "metric = Metric(name='NOISE_A_SMOOTH_10',\n", + " description='Basic smoothing calculation',\n", + " function='rolling_avg',\n", + " kwargs= {'name': ['NOISE_A'], 'window_size': 10}\n", + " )\n", + "test.get_device(16871).add_metric(metric)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test.get_device(16871).process(lmetrics=['NOISE_A_SMOOTH_10'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "metric = Metric(name='NOISE_A_SMOOTH_60',\n", + " description='Basic smoothing calculation',\n", + " function='rolling_avg',\n", + " kwargs= {'name': ['NOISE_A'], 'window_size': 60}\n", + " )\n", + "test.get_device(16871).add_metric(metric)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test.get_device(16871).process(lmetrics=['NOISE_A_SMOOTH_60'])" ] }, { @@ -332,7 +414,7 @@ "metadata": {}, "outputs": [], "source": [ - "test.devices['14627'].readings.columns" + "test.get_device(16871).data.columns" ] }, { @@ -341,22 +423,25 @@ "metadata": {}, "outputs": [], "source": [ - "traces = {1: {'devices': '14627',\n", + "traces = {1: {'devices': 16871,\n", " 'channel': 'NOISE_A',\n", " 'subplot': 1},\n", - " 2: {'devices': '14627',\n", + " 2: {'devices': 16871,\n", " 'channel': 'NOISE_A_SMOOTH',\n", " 'subplot': 1},\n", - " 3: {'devices': '14627',\n", + " 3: {'devices': 16871,\n", " 'channel': 'NOISE_A_SMOOTH_10',\n", " 'subplot': 1},\n", - " 4: {'devices': '14627',\n", + " 4: {'devices': 16871,\n", " 'channel': 'NOISE_A_SMOOTH_60',\n", - " 'subplot': 1} \n", + " 'subplot': 1},\n", + " 5: {'devices': 16871,\n", + " 'channel': 'TEMP',\n", + " 'subplot': 2} \n", " }\n", "\n", "options = {\n", - " 'frequency': '.2H'\n", + " 'frequency': '1Min'\n", "}\n", "formatting = {'width': 800, 'height': 400}\n", "test.ts_uplot(traces = traces, options = options, formatting=formatting)" @@ -365,7 +450,6 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, "tags": [] }, "source": [ @@ -394,38 +478,36 @@ "help(scdata.device.process.timeseries.clean_ts)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will remove values between 35-50dBA and perform a rolling average on the data that is left:" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "metric = {f'PM_1_CLEAN': {'process': 'clean_ts',\n", - " 'kwargs': {'name': 'PM_1', 'limits': [0, 1000], 'window_size': 3}\n", - " }}\n", - "\n", - "test.devices['14602'].add_metric(metric)\n", - "test.process(only_new = True)" + "metric = Metric(name='NOISE_A_CL',\n", + " description='Clean Data calculation',\n", + " function='clean_ts',\n", + " kwargs= {'name': 'NOISE_A', 'limits': [35, 50], 'window_size': 3}\n", + " )\n", + "test.get_device(16871).add_metric(metric)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "traces = {1: {'devices': '14602',\n", - " 'channel': 'PM_1',\n", - " 'subplot': 1},\n", - " 2: {'devices': '14602',\n", - " 'channel': 'PM_1_CLEAN',\n", - " 'subplot': 1}, \n", - " }\n", - "\n", - "options = {\n", - " 'frequency': '1H'\n", - "}\n", - "test.ts_uplot(traces = traces, options = options)" + "test.process(only_new = True)" ] }, { @@ -434,13 +516,7 @@ "metadata": {}, "outputs": [], "source": [ - "metric = {f'SCD30_CO2_SMOOTH': {'process': 'rolling_avg',\n", - " 'kwargs': {'name': ['SCD30_CO2'],\n", - " 'window_size': 10}\n", - " }}\n", - "\n", - "test.devices['14627'].add_metric(metric)\n", - "test.devices['14627'].process(only_new=True)" + "test.get_device(16871).data.loc[:,['NOISE_A', 'NOISE_A_CL']]" ] }, { @@ -449,12 +525,12 @@ "metadata": {}, "outputs": [], "source": [ - "traces = {1: {'devices': '14627',\n", - " 'channel': 'SCD30_CO2',\n", + "traces = {1: {'devices': 16871,\n", + " 'channel': 'NOISE_A',\n", " 'subplot': 1},\n", - " 2: {'devices': '14627',\n", - " 'channel': 'SCD30_CO2_SMOOTH',\n", - " 'subplot': 1}\n", + " 2: {'devices': 16871,\n", + " 'channel': 'NOISE_A_CL',\n", + " 'subplot': 1}, \n", " }\n", "\n", "options = {\n", @@ -462,18 +538,11 @@ "}\n", "test.ts_uplot(traces = traces, options = options)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -487,7 +556,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/05_sensor_calibration_workflows.ipynb b/examples/notebooks/05_sensor_calibration_workflows.ipynb deleted file mode 100644 index 7328e1bb..00000000 --- a/examples/notebooks/05_sensor_calibration_workflows.ipynb +++ /dev/null @@ -1,405 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Calibration workflows\n", - "\n", - "This notebook shows how to perform calibration based on sensor test data, export and load various types of models. The main implementation is based on sklearn's models and makes use of the fit/predict/transform convention to generalise the structure applied for sensor's processing. \n", - "\n", - "These flows can later on be implemented to process sensors' data automatically by using blueprints, simply naming the metric to add (see processing_data.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.test import Test\n", - "from scdata.device import Device\n", - "from scdata._config import config\n", - "\n", - "config.out_level='DEBUG'\n", - "config.framework='jupyterlab'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load your data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test = Test('PROCESS_EXAMPLE')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Add as many devices as needed. See understanding blueprints below for more info\n", - "test.add_device(Device(blueprint = 'sc_21_station_iscape', descriptor = {'source': 'api', \n", - " 'id': '10751', \n", - " 'min_date': '2020-11-01'\n", - " }\n", - " )\n", - " )\n", - "\n", - "# Add as many devices as needed. See understanding blueprints below for more info\n", - "test.add_device(Device(blueprint = 'sc_21_station_iscape', descriptor = {'source': 'api', \n", - " 'id': '10752', \n", - " 'min_date': '2020-11-01'\n", - " }\n", - " )\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test.create()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "#test.create()\n", - "test.load()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create models\n", - "\n", - "This section will go through creating some models that will aim to make some extra metrics based on linear or not so-linear models. As mentioned above, this is entirely based on sklearn's package, so it will make extensive use of it." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Linear model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# sklearn model tools\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.linear_model import LinearRegression\n", - "from sklearn.ensemble import RandomForestRegressor\n", - "\n", - "# Extra tools\n", - "from scdata.test.utils import normalise_vbls\n", - "from scdata.io import model_export, model_load\n", - "from scdata.utils import get_metrics" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Model inputs. \n", - "# Here we will calibrate temperature of one device based on another's temperature\n", - "measurand = {'10751': ['EXT_TEMP']} # Ground truth\n", - "inputs = {'10752': ['TEMP']} # Input\n", - "variables = {\"measurand\": measurand, \"inputs\": inputs}\n", - "\n", - "# Options\n", - "options = config._model_def_opt\n", - "\n", - "# Prepare options\n", - "df, refn = test.prepare(measurand, inputs)\n", - "\n", - "# Do something else with df if necessary\n", - "labels, features = normalise_vbls(df, refn)\n", - "\n", - "# Train test split\n", - "train_X, test_X, train_y, test_y = train_test_split(features, labels, \n", - " test_size = options['test_size'], \n", - " shuffle = options['shuffle'])\n", - "\n", - "# Create model\n", - "model = LinearRegression()\n", - "\n", - "# Fit - predict\n", - "model.fit(train_X, train_y)\n", - "train_yhat = model.predict(train_X)\n", - "test_yhat = model.predict(test_X)\n", - "\n", - "# Diagnose\n", - "metrics = {'train': get_metrics(train_y, train_yhat),\n", - " 'test': get_metrics(test_y, test_yhat)}\n", - "\n", - "# Export\n", - "model_export(name = 'LINEAR_TEMPERATURE', model = model, variables = variables, \n", - " hyperparameters = None, options = options,\n", - " metrics = metrics)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Check the metrics\n", - "print (metrics['train'])\n", - "print (metrics['test'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Now we can add the metrics into the test\n", - "metric = {f'TEMP_CORR': {'process': 'apply_regressor',\n", - " 'kwargs': {'model': model,\n", - " 'variables': variables,\n", - " 'options': options}\n", - " }}\n", - "\n", - "# Add it and process it\n", - "test.devices['10752'].add_metric(metric)\n", - "test.devices['10752'].process()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Plot" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': '10751',\n", - " 'channel': 'EXT_TEMP',\n", - " 'subplot': 1},\n", - " 2: {'devices': '10752',\n", - " 'channel': 'TEMP_CORR',\n", - " 'subplot': 1},\n", - " 3: {'devices': '10752',\n", - " 'channel': 'TEMP',\n", - " 'subplot': 1}, \n", - " }\n", - "\n", - "options = {\n", - " 'frequency': '1H'\n", - "}\n", - "test.ts_uplot(traces = traces, options = options)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': '10751',\n", - " 'channel': 'EXT_TEMP',\n", - " 'subplot': 1},\n", - " 2: {'devices': '10752',\n", - " 'channel': 'TEMP_CORR',\n", - " 'subplot': 1} \n", - " }\n", - "\n", - "options = {\n", - " 'frequency': '1H'\n", - "}\n", - "test.scatter_plot(traces = traces, options = options);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ML model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Model inputs\n", - "measurand = {'10751': ['EXT_TEMP']} # Ground truth\n", - "inputs = {'10752': ['TEMP']} # Input\n", - "variables = {\"measurand\": measurand, \"inputs\": inputs}\n", - "\n", - "# Hyperparameters and options\n", - "hyperparameters = config._model_hyperparameters['rf']\n", - "options = config._model_def_opt\n", - "\n", - "# This averages the common channels into one, if any\n", - "options['common_avg'] = True\n", - "\n", - "# Prepare options\n", - "df, refn = test.prepare(measurand, inputs, options)\n", - "\n", - "# Do something else with df if necessary\n", - "labels, features = normalise_vbls(df, refn)\n", - "\n", - "# Train test split\n", - "train_X, test_X, train_y, test_y = train_test_split(features, labels, \n", - " test_size = options['test_size'], \n", - " shuffle = options['shuffle'])\n", - "\n", - "# Create model\n", - "model = RandomForestRegressor(n_estimators = hyperparameters['n_estimators'], \n", - " min_samples_leaf = hyperparameters['min_samples_leaf'], \n", - " oob_score = hyperparameters['oob_score'], \n", - " max_features = hyperparameters['max_features'])\n", - "\n", - "# Fit - predict\n", - "model.fit(train_X, train_y)\n", - "train_yhat = model.predict(train_X)\n", - "test_yhat = model.predict(test_X)\n", - "\n", - "# Diagnose\n", - "metrics = {'train': get_metrics(train_y, train_yhat),\n", - " 'test': get_metrics(test_y, test_yhat)}\n", - "\n", - "# Export\n", - "model_export(name = 'RF_TEMP', model = model, variables = variables, \n", - " hyperparameters = hyperparameters, options = options,\n", - " metrics = metrics)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Now we can add the metrics into the test\n", - "metric = {f'TEMP_CORR_ML': {'process': 'apply_regressor',\n", - " 'kwargs': {'model': model,\n", - " 'variables': variables,\n", - " 'options': options}\n", - " }}\n", - "\n", - "# Add it and process it\n", - "test.devices['10752'].add_metric(metric)\n", - "test.devices['10752'].process(only_new = True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': '10751',\n", - " 'channel': 'EXT_TEMP',\n", - " 'subplot': 1},\n", - " 2: {'devices': '10752',\n", - " 'channel': 'TEMP_CORR',\n", - " 'subplot': 1},\n", - " 3: {'devices': '10752',\n", - " 'channel': 'TEMP_CORR_ML',\n", - " 'subplot': 1}, \n", - " 4: {'devices': '10752',\n", - " 'channel': 'TEMP',\n", - " 'subplot': 1}, \n", - " }\n", - "\n", - "options = {\n", - " 'frequency': '1H'\n", - "}\n", - "test.ts_uplot(traces = traces, options = options)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': ['10751', '10752'],\n", - " 'channels': ['EXT_TEMP', 'TEMP_CORR'],\n", - " 'subplot': 1},\n", - " 2: {'devices': ['10751', '10752'],\n", - " 'channels': ['EXT_TEMP', 'TEMP_CORR_ML'],\n", - " 'subplot': 2} \n", - " }\n", - "\n", - "\n", - "options = {'frequency': '1H'}\n", - "formatting = {'width': 25, 'height': 10, 'ylabel': {1: 'Corrected temperature (degC)'}, \n", - " 'title': 'Alphadelta / Avda Roma - Traffic',\n", - " 'xlabel': {1: 'Ground trugh (degC)'}, \n", - " 'fontsize': 12}\n", - "\n", - "test.scatter_plot(traces = traces, options = options, formatting = formatting);" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/06_upload_to_zenodo.ipynb b/examples/notebooks/06_upload_to_zenodo.ipynb deleted file mode 100644 index c69c7a1a..00000000 --- a/examples/notebooks/06_upload_to_zenodo.ipynb +++ /dev/null @@ -1,137 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Upload to Zenodo" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is an example to upload datasets or publications to zenodo.org.\n", - "\n", - "You have to fill up the yaml file in the data/uploads folder and put your reports or pdfs there. The data that comes from the tests are not needed in that folder.\n", - "\n", - "You will also need to install `pdfrw` and `reportlab` as separate packages." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import scdata as sc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.utils.zenodo import zenodo_upload\n", - "from scdata._config import config\n", - "\n", - "config._out_level = 'DEBUG'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Remember to have the `ZENODO_TOKEN` environment variable set. If you don't have one, visit the [docs](https://docs.smartcitizen.me/Guides/data/Upload%20data%20to%20zenodo/).\n", - "You can load an `.env` file using the util below:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.utils.meta import load_env\n", - "\n", - "envfile = '/path/to/.env'\n", - "\n", - "load_env(envfile)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Alternatively**, you can add it like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from os import environ" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "environ['ZENODO_TOKEN'] = 'yourtokenhere'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The cell below will use the example_zenodo_upload.yaml file that will find in the `~/.cache/scdata/uploads` folder. This submission is not valid (will return a `Validation error`), and needs to be modified to get a valid HTTP response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# You can use the sandbox.zenodo.org for tests, as well as a dry_run. When you are happy with your upload, set these variables to False\n", - "# Then go to uploads in the zenodo section and publish whenever you are ready\n", - "zenodo_upload('upload_20220530', sandbox = False, dry_run = True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/08_querying_the_sc_api.ipynb b/examples/notebooks/08_querying_the_sc_api.ipynb index 0560a3a2..775edf15 100644 --- a/examples/notebooks/08_querying_the_sc_api.ipynb +++ b/examples/notebooks/08_querying_the_sc_api.ipynb @@ -1,15 +1,10 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "import scdata as sc\n", - "from scdata._config import config\n", - "\n", - "config._out_level = 'DEBUG'" + "# Querying and searching in the SC API" ] }, { @@ -18,41 +13,56 @@ "metadata": {}, "outputs": [], "source": [ - "from scdata.io.device_api import ScApiDevice" + "import smartcitizen_connector" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "There are three ways of querying the API:\n", + "There are many ways of querying the API:\n", "\n", - "- getting all devices (with filtering possibilities) using `get_world_map`\n", - "- by global_search - `ScApiDevice.global_search`: Gets devices from Smart Citizen API based on basic search query values, searching both Users and Devices at the same time.\n", - "- by ransack parameters - `ScApiDevice.search_by_query`: Similar to the pagination, you can filter and sort most responses that return more than one result. This is done with the Ransack library." + "- getting one device\n", + "- global search (with filtering possibilities) using `global_search`\n", + "- by parameters parameters - `search_by_query`: Similar to the pagination, you can filter and sort most responses that return more than one result. This is done with the Ransack library." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## World Map" + "## Get one device" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "All basic information about devices" + "d = smartcitizen_connector.SCDevice(17177)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "vars(d.json)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "ScApiDevice.get_world_map(full = True)" + "await d.get_data()" ] }, { @@ -68,7 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "df = ScApiDevice.global_search(value = 'AIR', full = True)" + "df = smartcitizen_connector.global_search(value = 'AIR')" ] }, { @@ -93,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "help(ScApiDevice.search_by_query)" + "help(smartcitizen_connector.search_by_query)" ] }, { @@ -102,7 +112,12 @@ "metadata": {}, "outputs": [], "source": [ - "df = ScApiDevice.search_by_query(key = 'postprocessing_id', value = 'not_null', full = True)" + "df = smartcitizen_connector.search_by_query(endpoint='devices',\n", + " search_items=[{\n", + " 'key': 'postprocessing_id', \n", + " 'value': 'not_null',\n", + " 'full': True\n", + " }])" ] }, { @@ -145,7 +160,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -159,7 +174,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/09_load_and_post.ipynb b/examples/notebooks/09_load_and_post.ipynb index e33fbae5..aa864d0a 100644 --- a/examples/notebooks/09_load_and_post.ipynb +++ b/examples/notebooks/09_load_and_post.ipynb @@ -8,44 +8,10 @@ "source": [ "import scdata as sc\n", "from scdata._config import config\n", - "from scdata.utils.meta import load_env\n", "from scdata.io import sdcard_concat\n", "from os.path import join\n", "\n", - "config._out_level = 'DEBUG'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Load the oAuth API Key into your environment. You can use either format from below" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "'''\n", - "Envile path with the following format\n", - "---\n", - "SC_BEARER=...\n", - "[...]\n", - "---\n", - "Where SC_BEARER can be obtained from https://smartcitizen.me/profile/users - oAuth API Key\n", - "'''\n", - "\n", - "envfile = '/PATH/TO/ENVFILE/.env'\n", - "load_env(envfile)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Alternatively** you can load the oAuth API Key as below" + "config._log_level = 'DEBUG'" ] }, { @@ -63,7 +29,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Location" + "## Copy the `sd-card` files" ] }, { @@ -130,7 +96,8 @@ "outputs": [], "source": [ "folder = join(config.paths['raw'], \"12552\")\n", - "device = sc.Device(descriptor = {'source': 'api', 'id': \"12552\"})" + "device = sc.Device(blueprint = 'sc_air', \n", + " params=sc.APIParams(id=12552))" ] }, { @@ -140,7 +107,8 @@ "outputs": [], "source": [ "# If output = '', it only returns a DataFrame\n", - "device.readings = sdcard_concat(folder, output = '')" + "device.data = sdcard_concat(folder, output = '')\n", + "device.handler.data = device.data" ] }, { @@ -149,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "device.readings" + "device.data" ] }, { @@ -159,9 +127,9 @@ "outputs": [], "source": [ "# If output is a *.csv or *.CSV file, saves also a CSV in the same folder. Default is CONCAT.CSV\n", - "device.readings = sdcard_concat(folder, output = 'CONCAT.CSV')\n", + "device.data = sdcard_concat(folder, output = 'CONCAT.CSV')\n", "# Is the same as\n", - "# device.readings = sdcard_concat(folder)" + "# device.data = sdcard_concat(folder)" ] }, { @@ -180,7 +148,7 @@ }, "outputs": [], "source": [ - "device.post_sensors(dry_run = True) # If dry_run = True, prints the json payload of the first chunk only, otherwise, it posts all the data" + "await device.post(dry_run=True, columns='sensors') # If dry_run = True, prints the json payload of the first chunk only, otherwise, it posts all the data" ] }, { @@ -213,15 +181,14 @@ " folder = join(config.paths['raw'], device)\n", " \n", " # If you have a normal sck_21...\n", - " dev = sc.Device(descriptor = {'source': 'api', 'id': device})\n", + " dev = sc.Device(blueprint = 'sc_air', \n", + " params=sc.APIParams(id=device))\n", " \n", - " # If you have something else... modify the blueprint\n", - " # dev = sc.Device(blueprint = 'sck_21_co2', descriptor = {'source': 'api', 'id': device})\n", - "\n", " print (f'Processing device {device}')\n", " print (f'Loading files from {folder}')\n", "\n", - " dev.readings = sdcard_concat(folder)\n", + " dev.data = sdcard_concat(folder)\n", + " dev.handler.data = dev.data\n", " \n", " devd[device] = dev" ] @@ -230,7 +197,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Have a look at things before posting" + "Have a look at things before posting..." ] }, { @@ -248,16 +215,7 @@ "metadata": {}, "outputs": [], "source": [ - "devd[devices[0]].readings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "devd[devices[0]].sensors" + "devd[devices[0]].data" ] }, { @@ -273,7 +231,7 @@ "metadata": {}, "outputs": [], "source": [ - "devd[devices[0]].post_sensors(dry_run = True) # If dry_run = True, prints the json payload of the first chunk only" + "await devd[devices[0]].post(dry_run = True, columns='sensors') # If dry_run = True, prints the json payload of the first chunk only" ] }, { @@ -290,13 +248,13 @@ "outputs": [], "source": [ "for device in devices:\n", - " devd[device].post_sensors(dry_run = True)" + " await devd[device].post(dry_run = True, columns='sensors')" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -310,7 +268,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/10_basic_workflow.ipynb b/examples/notebooks/10_basic_workflow.ipynb deleted file mode 100644 index af631c3a..00000000 --- a/examples/notebooks/10_basic_workflow.ipynb +++ /dev/null @@ -1,397 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Workflow example\n", - "\n", - "This notebook demonstrates a basic workflow for loading data, making timeseries plots and saving it to csv files, with two different ways: \n", - "\n", - "- Individual `device`\n", - "- `Test` containing various devices\n", - "\n", - "This is an example of the metadata stored in a `test`, alongside a collection of devices with different options: \n", - "\n", - "- author\n", - "- project\n", - "- notes\n", - "- dates\n", - "- report\n", - "- ..." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Device example\n", - "\n", - "Devices can be loaded from various sources:\n", - "- local csv files\n", - "- the Smart Citizen API\n", - "- the MUV api\n", - "- open data APIs such as the Barcelona City council.\n", - "- NILU iflink API (Norwegian Institute for Air Research)\n", - "\n", - "This notebook will showcase the SmartCitizen API one. Visit [this notebook](./02_access_the_power_of_data.ipynb) to get more info on how to acces other sources." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata._config import config\n", - "\n", - "config._out_level = 'DEBUG'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.device import Device\n", - "\n", - "# Below, the device ID is the number after kits/ in the kit URL, for instance:\n", - "# or this kit: http://smartcitizen.me/kits/13625, the device would be 13625\n", - "device = Device(blueprint = 'sck_21', descriptor = {'id': '13625', \n", - " # The source is always api when it comes from any API, \n", - " # in this case as it's an sck_21, we'll use the SmartCitizen one\n", - " 'source': 'api', \n", - " # The frequency at which we want to load the data. By default, we don't clean NaNs\n", - " 'frequency': '1Min'})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get the device information\n", - "print ('---SENSORS---')\n", - "print (device.sensors)\n", - "# The device contains another sub-device from the API in question that shows other methods\n", - "print ('\\n---ADDED AT---')\n", - "print (device.api_device.get_device_added_at())\n", - "print ('\\n---LAST READING---')\n", - "print (device.api_device.get_device_last_reading())\n", - "print ('\\n---TIMEZONE---')\n", - "print (device.api_device.get_device_timezone())\n", - "print ('\\n---API SENSORS---')\n", - "print (device.api_device.get_device_sensors())\n", - "print ('\\n---API KIT ID---')\n", - "print (device.api_device.get_kit_ID())\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this case, we assumed the device was a SCK 2.1 blueprint, but in fact the platform returns a kit_id 33 [see https://api.smartcitizen.me/v0/kits?per_page=200](https://api.smartcitizen.me/v0/kits?per_page=200). The sensors there will be used." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# Not get the device data\n", - "device.load();" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Take a look at the first rows\n", - "device.readings.head(4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# The readings object is a pandas.DataFrame() object, with the same properties to plot, filter, get data, etc\n", - "# More information on the pandas.DataFrame() object here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n", - "device.readings[['TEMP']].plot(figsize = (15,10), \n", - " grid = True, \n", - " ylim=(15,20))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get some basic metrics\n", - "print (device.readings[['TEMP']].mean())\n", - "print (device.readings[['TEMP']].max())\n", - "print (device.readings[['TEMP']].min())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Test example\n", - "\n", - "Tests are \"more complex\" structures, that allow having plenty of devices in the same abstract representation. It allows for traceability of different deployments with metadata stored alongside with it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata import Test\n", - "# The second time you load it, you don't need to input the whole name, just some words. Then, in the input box, put the number for the test\n", - "test = Test('MINKE_WORFKLOW')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "### WARNING: Run this cell only the first time, when you create the test\n", - "\n", - "# Add the devices you want to it\n", - "devices = ['13625', '13604', '13605']\n", - "\n", - "for device in devices:\n", - " # Tests can have devices from many sources, and they can be compared in a common framework (from csv data, API(s), etc.)\n", - " test.add_device(Device(blueprint = 'sck_21', descriptor = {'source': 'api',\n", - " 'id': device,\n", - " 'frequency': '1Min',\n", - " 'timezone': 'Europe/Madrid'}))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "### WARNING: Run this cell only the first time, when you create the test\n", - "\n", - "# Create it\n", - "test.create()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This creates the necessary folder structure and data in the following path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test.path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# Finally, load it\n", - "test.load()\n", - "\n", - "# Alternatively, you can load from different dates - if you have cached the files, you might need to delete them first\n", - "# Options for min_date, max_date, frequency, or what to do with the NaNs\n", - "\n", - "# options = {'min_date': '2021-01-20'}\n", - "# test.load(options = options)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "All csv data is directly stored in the folder above, but in the `cached` subfolder. Next time, the load process from the API will account for what is already in that folder and won't load the same data again. The margin to reload data can be adjusted in the `cached_data_margin` parameter in the `config.yaml` file (in hours)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Explore a bit\n", - "test.devices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# The same applies for the devices data inside (a pandas.DataFrame)\n", - "test.devices['13625'].readings.head(4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Make a plot (basic one)\n", - "traces = {1: {'devices': 'all', 'channel': 'TEMP', 'subplot': 1}}\n", - "\n", - "test.ts_plot(traces = traces);" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Make some adjustments\n", - "traces = {1: {'devices': 'all', 'channel': 'TEMP', 'subplot': 1}}\n", - "\n", - "formatting = {'width': 12, 'height': 8, 'ylabel': {1: 'TEMP'}, 'title': 'Temperature comparison'}\n", - "\n", - "# Options for min_date, max_date, frequency, or what to do with the NaNs\n", - "options = {'min_date': '2021-01-19 12:00:00', 'max_date': '2021-01-22', 'frequency': '10Min', 'clean_na': None}\n", - "\n", - "test.ts_plot(traces = traces, options = options, formatting = formatting);" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Make some adjustments and some subplots\n", - "# If you put 'all' in the devices for the traces, it will plot all of them\n", - "# Otherwise, you can just put a list of the devices you want\n", - "traces = {1: {'devices': 'all', 'channel': 'TEMP', 'subplot': 1},\n", - " 2: {'devices': ['13625', '13604'], 'channel': 'HUM', 'subplot': 2}}\n", - "\n", - "formatting = {'width': 12, \n", - " 'height': 10, \n", - " 'ylabel': {1: 'TEMP (degC)', 2: 'HUM (%rh)'}, \n", - " 'title': 'Temperature and humidity comparison'}\n", - "\n", - "options = {'min_date': '2021-01-19 12:00:00','max_date': '2021-01-22', 'frequency': '10Min', 'clean_na': None}\n", - "fig = test.ts_plot(traces = traces, options = options, formatting = formatting);\n", - "\n", - "# Uncomment below to save the figure somewhere\n", - "# fig.savefig('~/Desktop/plot.png', dpi = 300, transparent=False, bbox_inches='tight')\n", - "\n", - "# Visit the 03_plotting_in_no_time example to explore more options regarding plots" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Make some interactive plots (if you have plotly installed)\n", - "# If you put 'all' in the devices for the traces, it will plot all of them\n", - "# Otherwise, you can just put a list of the devices you want\n", - "traces = {1: {'devices': 'all', 'channel': 'TEMP', 'subplot': 1},\n", - " 2: {'devices': ['13625', '13604'], 'channel': 'HUM', 'subplot': 2}}\n", - "\n", - "formatting = {'width': 800, \n", - " 'height': 600, \n", - " 'ylabel': {1: 'TEMP (degC)', 2: 'HUM (%rh)'}, \n", - " 'title': 'Temperature and humidity comparison'}\n", - "\n", - "options = {'min_date': '2021-01-19 12:00:00', 'max_date': '2021-01-22', 'frequency': '10Min', 'clean_na': None}\n", - "test.ts_uplot(traces = traces, options = options, formatting = formatting)\n", - "\n", - "# Uncomment below to save the figure somewhere\n", - "# fig.savefig('~/Desktop/plot.png', dpi = 300, transparent=False, bbox_inches='tight')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Export data to the desktop in csv\n", - "test.devices['13625'].export(path ='~/Desktop')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Or the whole thing\n", - "test.to_csv()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# You can also make a descriptor front page in HTML\n", - "test.to_html();" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/11_making_html-pdf_reports.ipynb b/examples/notebooks/11_making_html-pdf_reports.ipynb deleted file mode 100644 index 4a9e2dd6..00000000 --- a/examples/notebooks/11_making_html-pdf_reports.ipynb +++ /dev/null @@ -1,208 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Making an html-pdf report\n", - "\n", - "This example shows how to make an html report based on the analysis carried out in the text." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scdata.test import Test\n", - "from scdata._config import config\n", - "\n", - "config.out_level='DEBUG'\n", - "config.framework='jupyterlab'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# Load the test from the other example\n", - "test = Test('EXAMPLE')\n", - "test.load()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Make a plot and show it" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': '14602', 'channel': 'NOISE_A', 'subplot': 1}\n", - " }\n", - "\n", - "options = {'min_date': '2021-10-15 12:00:00', 'frequency': '5Min', 'clean_na': None}\n", - "formatting = {'width': 14, 'ylabel': {1: 'Noise (dBA)'}}\n", - "fig = test.ts_plot(traces = traces, options = options, formatting = formatting)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Add the figure and some text on the report" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "text = '''\n", - "Eius nesciunt quae perspiciatis sequi. Aliquid exercitationem aliquam similique neque quod velit. Saepe voluptas earum similique facilis. Consectetur inventore magnam cupiditate id excepturi ea. Rerum odit a maxime rem eum cumque quos. Ut exercitationem exercitationem officia.\n", - "Ipsa laborum aspernatur in. Est itaque doloribus est unde adipisci ut. Eaque sint voluptatem quo ab.\n", - "Cumque ipsa est quas ut qui aliquam. Non labore aspernatur atque. Repellat quo ut quos autem quam. Ut quis ex voluptatem at adipisci. Officiis sint dicta vero soluta numquam. Natus repudiandae et magnam.\n", - "Deserunt aspernatur ullam quasi voluptas. Quam pariatur ut incidunt. Totam labore itaque eaque est illo est. Dolorem dicta voluptatibus quis qui consectetur rem.\n", - "Dolores dolores fuga reprehenderit ea ut vel. Eaque quia debitis voluptatum corporis dicta delectus. Consequatur voluptas aspernatur et inventore. Aut eaque nihil totam dolore.\n", - "Voluptas error quia magnam aliquid aliquam nesciunt consequatur. Provident saepe quia consequatur blanditiis inventore. Totam ex ut laudantium cum quae non magni. Vel quaerat cumque nulla omnis.\n", - "Perferendis qui non omnis perspiciatis aliquam ut. Nihil voluptates fugiat repellendus veniam saepe sed dolorem consequatur. Itaque nulla dolor id perferendis earum vitae.\n", - "Autem sit quia quia nihil et reprehenderit ipsa exercitationem. Mollitia perferendis voluptas odio consequuntur voluptatem natus dolorem ipsa. Labore harum quas voluptatem consequatur blanditiis quidem praesentium iste.\n", - "Nobis recusandae nihil quas in rem nihil. Ea vel veniam quam. Beatae consequatur architecto sit cumque sit ut nihil dolore. Qui et quos eos temporibus. Consequatur iure velit voluptatem velit.\n", - "Aut minus nobis repellendus quis. Ducimus esse animi quod ab est fuga perferendis. Doloribus voluptatum illum et. Ut fuga animi a architecto maxime quia neque sit.\n", - "'''\n", - "\n", - "test.add_content(title = 'Noise analysis for in 14602', figure = fig, text = text)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And maybe another one" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': 'csv_device', 'channel': 'NOISE_A', 'subplot': 1}\n", - " }\n", - "\n", - "options = {'frequency': '1Min', 'clean_na': None}\n", - "formatting = {'width': 14, 'ylabel': {1: 'Noise (dBA)'}}\n", - "fig = test.ts_plot(traces = traces, options = options, formatting = formatting)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "text = '''\n", - "Eius nesciunt quae perspiciatis sequi. Aliquid exercitationem aliquam similique neque quod velit. Saepe voluptas earum similique facilis. Consectetur inventore magnam cupiditate id excepturi ea. Rerum odit a maxime rem eum cumque quos. Ut exercitationem exercitationem officia.\n", - "Ipsa laborum aspernatur in. Est itaque doloribus est unde adipisci ut. Eaque sint voluptatem quo ab.\n", - "Cumque ipsa est quas ut qui aliquam. Non labore aspernatur atque. Repellat quo ut quos autem quam. Ut quis ex voluptatem at adipisci. Officiis sint dicta vero soluta numquam. Natus repudiandae et magnam.\n", - "Deserunt aspernatur ullam quasi voluptas. Quam pariatur ut incidunt. Totam labore itaque eaque est illo est. Dolorem dicta voluptatibus quis qui consectetur rem.\n", - "Dolores dolores fuga reprehenderit ea ut vel. Eaque quia debitis voluptatum corporis dicta delectus. Consequatur voluptas aspernatur et inventore. Aut eaque nihil totam dolore.\n", - "Voluptas error quia magnam aliquid aliquam nesciunt consequatur. Provident saepe quia consequatur blanditiis inventore. Totam ex ut laudantium cum quae non magni. Vel quaerat cumque nulla omnis.\n", - "Perferendis qui non omnis perspiciatis aliquam ut. Nihil voluptates fugiat repellendus veniam saepe sed dolorem consequatur. Itaque nulla dolor id perferendis earum vitae.\n", - "Autem sit quia quia nihil et reprehenderit ipsa exercitationem. Mollitia perferendis voluptas odio consequuntur voluptatem natus dolorem ipsa. Labore harum quas voluptatem consequatur blanditiis quidem praesentium iste.\n", - "Nobis recusandae nihil quas in rem nihil. Ea vel veniam quam. Beatae consequatur architecto sit cumque sit ut nihil dolore. Qui et quos eos temporibus. Consequatur iure velit voluptatem velit.\n", - "Aut minus nobis repellendus quis. Ducimus esse animi quod ab est fuga perferendis. Doloribus voluptatum illum et. Ut fuga animi a architecto maxime quia neque sit.\n", - "'''\n", - "test.add_content(title = 'Noise analysis', figure = fig, text = text)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "traces = {1: {'devices': 'csv_device', 'channel': 'NOISE_A', 'subplot': 1}\n", - " }\n", - "\n", - "options = {'frequency': '1Min'}\n", - "formatting = {'width': 14, 'ylabel': {1: 'Noise (dBA)'}, 'title': 'Noise'}\n", - "\n", - "iframe = test.ts_uplot(traces = traces, options = options, formatting = formatting)\n", - "iframe" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "text = '''\n", - "Eius nesciunt quae perspiciatis sequi. Aliquid exercitationem aliquam similique neque quod velit. Saepe voluptas earum similique facilis. Consectetur inventore magnam cupiditate id excepturi ea. Rerum odit a maxime rem eum cumque quos. Ut exercitationem exercitationem officia.\n", - "Ipsa laborum aspernatur in. Est itaque doloribus est unde adipisci ut. Eaque sint voluptatem quo ab.\n", - "Cumque ipsa est quas ut qui aliquam. Non labore aspernatur atque. Repellat quo ut quos autem quam. Ut quis ex voluptatem at adipisci. Officiis sint dicta vero soluta numquam. Natus repudiandae et magnam.\n", - "Deserunt aspernatur ullam quasi voluptas. Quam pariatur ut incidunt. Totam labore itaque eaque est illo est. Dolorem dicta voluptatibus quis qui consectetur rem.\n", - "Dolores dolores fuga reprehenderit ea ut vel. Eaque quia debitis voluptatum corporis dicta delectus. Consequatur voluptas aspernatur et inventore. Aut eaque nihil totam dolore.\n", - "Voluptas error quia magnam aliquid aliquam nesciunt consequatur. Provident saepe quia consequatur blanditiis inventore. Totam ex ut laudantium cum quae non magni. Vel quaerat cumque nulla omnis.\n", - "Perferendis qui non omnis perspiciatis aliquam ut. Nihil voluptates fugiat repellendus veniam saepe sed dolorem consequatur. Itaque nulla dolor id perferendis earum vitae.\n", - "Autem sit quia quia nihil et reprehenderit ipsa exercitationem. Mollitia perferendis voluptas odio consequuntur voluptatem natus dolorem ipsa. Labore harum quas voluptatem consequatur blanditiis quidem praesentium iste.\n", - "Nobis recusandae nihil quas in rem nihil. Ea vel veniam quam. Beatae consequatur architecto sit cumque sit ut nihil dolore. Qui et quos eos temporibus. Consequatur iure velit voluptatem velit.\n", - "Aut minus nobis repellendus quis. Ducimus esse animi quod ab est fuga perferendis. Doloribus voluptatum illum et. Ut fuga animi a architecto maxime quia neque sit.\n", - "'''\n", - "\n", - "test.add_content(title = 'Interactive Noise',iframe = iframe, text = text, show_title = True, force = True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "report_path, _ = test.to_html(title='Noise analysis', devices_summary=True);\n", - "report_path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/12_geolocated_data.ipynb b/examples/notebooks/12_geolocated_data.ipynb deleted file mode 100644 index fa36409d..00000000 --- a/examples/notebooks/12_geolocated_data.ipynb +++ /dev/null @@ -1,228 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# GeoJSON visualisation\n", - "\n", - "This example walks through the process of plotting geolocated data in a leaflet interactive map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import scdata as sc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test = sc.Test('GEOJSON_EXAMPLE')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test.add_device(sc.Device(blueprint = 'sck_21_gps' , \n", - " descriptor = {'source': 'csv',\n", - " 'id': 'BIKE',\n", - " 'raw_data_file': 'geodata.csv',\n", - " 'timezone': 'Europe/Madrid',\n", - " 'frequency': '5S'\n", - " }\n", - " )\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test.create(force=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "test.load()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Static Map\n", - "\n", - "Static maps can be rendered by passing the `map_type = 'static'` argument. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Color mapping to channel\n", - "\n", - "A variable can be mapped into the colors of each reading." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "options = {\n", - " 'period': '20S', # Resampling period (change to 5S for larger resolution)\n", - " 'zoom': '14', # Zoom position on map\n", - " 'markers': True, # Show markers on plot \n", - " 'radius': 4, # Marker radius \n", - " 'stroke-width': 2, # Line-width\n", - " 'tiles': 'cartodbpositron', # Map tiles\n", - " 'minmax': True, # Map channel (if passed) to min max values or config._channel_bins\n", - " 'location': 'average' # Either list with [LAT, LONG] or 'average' for the points\n", - "}\n", - "ms = test.path_plot(devices=['BIKE'], channel = 'PM_25', map_type = 'static', options = options);\n", - "ms" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### GPX viewer\n", - "\n", - "If no channel is provided, a GPX viewer is returned" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "options = {\n", - " 'period': '5S', # Resampling period\n", - " 'zoom': '14', # Zoom position on map\n", - " 'markers': True, # Show markers on plot\n", - " 'stroke-width': 2, # Line-width\n", - " 'tiles': 'cartodbpositron', # Map tiles\n", - " 'location': 'average' # Either list with [LAT, LONG] or 'average' for the points\n", - "}\n", - "gv = test.path_plot(devices=['BIKE'], map_type = 'static', options = options);\n", - "gv" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dynamic map\n", - "\n", - "Small animations can be done with `map_style='dynamic'`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Color mapping to channel\n", - "\n", - "As above, if a variable is passed, a color mapping will be done in the traces. If the channel is not in `config._channel_bins`, the min and max limits will be used" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "options = {\n", - " 'period': '5S', # Resampling period\n", - " 'zoom': '14', # Zoom position on map\n", - " 'markers': True, # Show markers on plot \n", - " 'stroke-width': 2, # Line-width\n", - " 'radius': 4,\n", - " 'tiles': 'cartodbpositron', # Map tiles\n", - " 'location': 'average' # Either list with [LAT, LONG] or 'average' for the points\n", - "}\n", - "gv = test.path_plot(devices=['BIKE'], channel = 'GPS_SPEED', start_date = '2020-10-01 17:50:00', \n", - " map_type = 'dynamic', options = options);\n", - "gv" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Animated GPX viewer\n", - "\n", - "If no channel is passed, an animated single color GPX viewer is generated" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "options = {\n", - " 'period': '5S', # Resampling period\n", - " 'zoom': '14', # Zoom position on map\n", - " 'markers': True, # Show markers on plot \n", - " 'stroke-width': 2, # Line-width\n", - " 'radius': 4,\n", - " 'tiles': 'cartodbpositron', # Map tiles\n", - " 'location': 'average' # Either list with [LAT, LONG] or 'average' for the points\n", - "}\n", - "gv = test.path_plot(devices=['BIKE'], start_date = '2020-10-01 17:50:00', end_date = '2020-10-01 18:02:00',\n", - " map_type = 'dynamic', options = options);\n", - "gv" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/13_pdf_largescale_plots.ipynb b/examples/notebooks/13_pdf_largescale_plots.ipynb index 7fc10d3a..2a6851b2 100644 --- a/examples/notebooks/13_pdf_largescale_plots.ipynb +++ b/examples/notebooks/13_pdf_largescale_plots.ipynb @@ -9,7 +9,7 @@ "\n", "This example makes plots like the ones seen in this image: \n", "\n", - "![](https://live.staticflickr.com/4483/38165401276_ef6eacca0c_h.jpg)" + "![Data analysis with pen and paper tools in Plaça del Sol - Barcelona](https://live.staticflickr.com/4490/24368448418_d602723a10_h.jpg)" ] }, { @@ -30,7 +30,7 @@ "import scdata as sc\n", "from scdata._config import config\n", "\n", - "config._out_level='DEBUG'\n", + "config.log_level='DEBUG'\n", "config.framework='jupyterlab'" ] }, @@ -44,8 +44,8 @@ }, "outputs": [], "source": [ - "test = sc.Test('TEST')\n", - "test.load()" + "test = sc.Test(name='EXAMPLE')\n", + "await test.load()" ] }, { @@ -56,6 +56,22 @@ "## Make plots" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "68ef6085-4f81-455f-9f25-428e5233b851", + "metadata": {}, + "outputs": [], + "source": [ + "traces = {\n", + " \"1\": {\"devices\": \"all\",\n", + " \"channel\": [\"NOISE_A\"],\n", + " \"subplot\": 1}\n", + " }\n", + "\n", + "test.ts_uplot(traces=traces)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -67,7 +83,7 @@ "from os.path import exists\n", "from os import mkdir\n", "\n", - "savedir = '/path/to/folder'\n", + "savedir = '~/Desktop/pdf-scdata'\n", "if not exists(savedir): mkdir(savedir)" ] }, @@ -93,10 +109,10 @@ "\n", "metric = \"NOISE_A\"\n", "ylabel_i = \"Noise (dBA)\"\n", - "yrange_i = [25, 50]\n", + "yrange_i = [20, 60]\n", "paginate_every = 5\n", - "min_date = '2021-06-12'\n", - "max_date = '2021-06-14'\n", + "# min_date = '2021-06-12'\n", + "# max_date = '2021-06-14'\n", "sandbox = False\n", "title = \"VenicAir\"\n", "paper = \"A2\"\n", @@ -130,19 +146,22 @@ " ylabel = {}\n", " \n", " for device in test.devices:\n", - " if device in pages: continue\n", - " pages.append(device)\n", - " \n", - " d = test.devices[device].api_device.devicejson\n", - " \n", - "\n", - " traces [str(imgs)] = {\"devices\": device,\n", + " if device.id in pages: continue\n", + " pages.append(device.id) \n", + " print (device.id)\n", + " print (metric)\n", + " traces [str(imgs)] = {\"devices\": device.id,\n", " \"channel\": metric,\n", " \"subplot\": imgs\n", " }\n", "\n", " yrange [imgs] = yrange_i\n", - " ylabel [imgs] = setBold(ylabel_i) + '\\n\\n' + d['name'] + '\\n' + setItalic(f\"user: {d['owner']['username']}\")\n", + " if device.source.type == 'csv': continue\n", + " if 'json' in vars(device.handler):\n", + " owner = device.handler.json.owner.username\n", + " else:\n", + " owner = ''\n", + " ylabel [imgs] = setBold(ylabel_i) + '\\n\\n' + str(device.id) + '\\n' + setItalic(f\"user: {owner}\")\n", "\n", " imgs += 1\n", " if imgs == paginate_every + 1: \n", @@ -152,15 +171,15 @@ " options = {\n", " \"show\": True,\n", " \"frequency\": '5Min',\n", - " \"clean_na\": None,\n", - " \"max_date\": max_date,\n", - " \"min_date\": min_date\n", + " \"clean_na\": False,\n", + " # \"max_date\": max_date,\n", + " # \"min_date\": min_date\n", " }\n", "\n", " formatting = {\"xlabel\": \"Date\",\n", " \"ylabel\": ylabel,\n", " \"yrange\": yrange,\n", - " \"xrange\": {1: [min_date, max_date]},\n", + " # \"xrange\": {1: [min_date, max_date]},\n", " \"hspace\": 0.25, \n", " \"title\": None,\n", " \"sharex\":True,\n", @@ -197,6 +216,14 @@ "### User total\n" ] }, + { + "cell_type": "markdown", + "id": "fa248370-25e4-48d3-b561-243f58347598", + "metadata": {}, + "source": [ + "Make sure that the settings (y-ranges, dates, are applicable)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -308,7 +335,7 @@ "source": [ "## Make PDFs\n", "\n", - "From now on is all svg. Needs inkscape installed to do this automatically" + "From now on is all svg. Needs inkscape installed to do this automatically:" ] }, { @@ -343,7 +370,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -357,7 +384,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/14-easy-dispersion-analysis.ipynb b/examples/notebooks/14-easy-dispersion-analysis.ipynb deleted file mode 100644 index 4a92888b..00000000 --- a/examples/notebooks/14-easy-dispersion-analysis.ipynb +++ /dev/null @@ -1,150 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "466db05a-92bf-4046-b3c6-5edd3727902f", - "metadata": {}, - "source": [ - "# Dispersion analysis\n", - "\n", - "This example shows how to perform an easy dispersion analysis of some devices, plotting data with confidence intervals." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2676927f-93a5-48ce-8ab5-e27e49bf68d5", - "metadata": {}, - "outputs": [], - "source": [ - "import scdata as sc\n", - "import pprint\n", - "from IPython.display import display, HTML\n", - "\n", - "pp = pprint.PrettyPrinter(indent=4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a27d9a9f-c718-4ed1-a86d-e5cca69640b6", - "metadata": {}, - "outputs": [], - "source": [ - "t = sc.Test('DELIVERIES_122022')\n", - "t.add_devices_list(devices_list=list(range(15876,15884,1)), blueprint='sck_21')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee8743d1-1546-4f09-97ef-070d48704c0e", - "metadata": {}, - "outputs": [], - "source": [ - "t.create()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aadd58fc-985a-4f10-a2b8-1fdfc7b3bfb9", - "metadata": {}, - "outputs": [], - "source": [ - "t.add_devices_list(devices_list=[15835, 15837, 15838, 15839], blueprint='sck_21')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ba1a894d-0523-4101-a640-4a219d0bb269", - "metadata": {}, - "outputs": [], - "source": [ - "t.create(force=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef8dad5f-97d0-4be5-b3a5-ba7e2fc6c0a5", - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "t.load()" - ] - }, - { - "cell_type": "markdown", - "id": "32b01e6c-06ce-484a-975a-8530abf1f2f8", - "metadata": {}, - "source": [ - "Get the channels that all devices have (will show warnings if one device is not measuring a particular channel, or if it has less data points)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ce848f2e-e2f0-425e-9b8e-2fd8caf5be54", - "metadata": {}, - "outputs": [], - "source": [ - "t.get_common_channels()\n", - "print('\\nDispersion Analysis Summary:\\n')\n", - "pp.pprint(t.dispersion_analysis())" - ] - }, - { - "cell_type": "markdown", - "id": "493ed26b-bbd7-4a22-9cd6-cc4ccb9b7671", - "metadata": {}, - "source": [ - "Plot each metric, grouped and with confidence intervals" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7bda5a23-4c48-446a-a5a6-e71174e1310a", - "metadata": {}, - "outputs": [], - "source": [ - "for ch in t.common_channels:\n", - " display(t.ts_dispersion_uplot(channel = ch, formatting = {'width': 1000, 'height': 300, 'padding-bottom': 600, 'join_sbplot':True}))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41e3e7d3-37d3-4604-87d9-de73a15c6c78", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/notebooks/15-R-basic.ipynb b/examples/notebooks/15-R-basic.ipynb index 6683140b..ae13aa85 100644 --- a/examples/notebooks/15-R-basic.ipynb +++ b/examples/notebooks/15-R-basic.ipynb @@ -158,17 +158,21 @@ ], "metadata": { "kernelspec": { - "display_name": "R", - "language": "R", - "name": "ir" + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, "language_info": { - "codemirror_mode": "r", - "file_extension": ".r", - "mimetype": "text/x-r-source", - "name": "R", - "pygments_lexer": "r", - "version": "4.2.1" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" } }, "nbformat": 4, diff --git a/examples/notebooks/16-device_processing.ipynb b/examples/notebooks/16-device_processing.ipynb index 485b28b6..2f8882fc 100644 --- a/examples/notebooks/16-device_processing.ipynb +++ b/examples/notebooks/16-device_processing.ipynb @@ -4,9 +4,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Process Device\n", + "# Process Device with API data\n", "\n", - "This notebook guides through the very simple process of postprocessing data from a device and posting it to the platform" + "This notebook guides through the very simple process of postprocessing data from a device and posting it to the platform.\n", + "\n", + "**Note**: change the dry_run for actually posting data" ] }, { @@ -29,7 +31,7 @@ }, "outputs": [], "source": [ - "config._out_level = 'DEBUG'" + "config.log_level = 'DEBUG'" ] }, { @@ -41,7 +43,7 @@ "outputs": [], "source": [ "# Get device\n", - "device = sc.Device(descriptor = {'source': 'api', 'id': '15705'})" + "d = sc.Device(params=sc.APIParams(id=15618))" ] }, { @@ -53,8 +55,18 @@ "outputs": [], "source": [ "# Check blueprint and hardware information\n", - "print (device.blueprint)\n", - "print (device.api_device.postprocessing)" + "print (d.blueprint)\n", + "print (d.handler.hardware_postprocessing)\n", + "print (d.handler.postprocessing)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d.options" ] }, { @@ -65,21 +77,37 @@ "tags": [] }, "outputs": [], + "source": [ + "# Download data from the latest_postprocessing\n", + "d.options.min_date = d.handler.postprocessing['latest_postprocessing']\n", + "await d.load()\n", + "# You should get nothing here..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "# Download all the data\n", - "device.load(only_unprocessed = False)" + "d.options.min_date = None\n", + "await d.load()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { + "scrolled": true, "tags": [] }, "outputs": [], "source": [ "# Process it according to the blueprint\n", - "device.process()" + "d.process()" ] }, { @@ -91,19 +119,20 @@ "outputs": [], "source": [ "# Quick check on the data\n", - "device.readings['NO2'].plot()" + "d.data['NO2'].plot()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { + "scrolled": true, "tags": [] }, "outputs": [], "source": [ "# Post metrics (set dry_run to true just to check everything goes well, but do not actually post it)\n", - "device.post_metrics(dry_run=False)" + "await d.post(columns='metrics', dry_run=True)" ] }, { @@ -115,7 +144,7 @@ "outputs": [], "source": [ "# Update the postprocessing information\n", - "device.update_postprocessing()" + "d.update_postprocessing(dry_run=True)" ] }, { diff --git a/examples/notebooks/todo/batch_analysis.ipynb b/examples/notebooks/todo/batch_analysis.ipynb deleted file mode 100644 index f0cf775e..00000000 --- a/examples/notebooks/todo/batch_analysis.ipynb +++ /dev/null @@ -1,74 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "# Batch processing\n", - "\n", - "This example shows how to process data in batches, avoiding to prepare repeating tasks. Comment/uncomment the file below the descriptor file below to see the examples" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.models.batch import batch_analysis\n", - "\n", - "# Example for multiple plots\n", - "# descriptor_file = '../tasks/batchplot.json'\n", - "# Example for multiple models\n", - "descriptor_file = '../tasks/batchmodel.json'\n", - "\n", - "batch_session = batch_analysis(descriptor_file, verbose = True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "batch_session.run()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/todo/dispersion_analysis.ipynb b/examples/notebooks/todo/dispersion_analysis.ipynb deleted file mode 100644 index fd6ecbda..00000000 --- a/examples/notebooks/todo/dispersion_analysis.ipynb +++ /dev/null @@ -1,484 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "tags": [ - "show_only_output" - ] - }, - "source": [ - "# Batch dispersion analysis" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "## Initialise session and load data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "import warnings \n", - "warnings.filterwarnings('ignore')\n", - "from IPython.display import display, HTML\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.colors\n", - "from os.path import join\n", - "\n", - "from src.data.data import *\n", - "data = data_wrapper()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "# INPUT DATA\n", - "# Name of test to be analysed\n", - "dispersion_test = '2020-01_INT_ILLA_DE_AROUSA'\n", - "type_file = None\n", - "# Percentage of points to be considered NG sensor\n", - "limit_errors = 3\n", - "# Multiplier for std_dev (sigma) - Normal distribution (99.73%)\n", - "limit_confidence_sigma = 3\n", - "# t-student confidence level (%)\n", - "t_confidence_level = 99\n", - "# Use average dispersion or instantaneous\n", - "use_instantatenous_dispersion = False\n", - "# Min/max date for the analysis\n", - "# min_date = '2019-12-10 18:00:00'\n", - "min_date = None\n", - "max_date = None\n", - "# In case there is a device with lower amount of channels, ignore the missing channels and keep going\n", - "ignore_missing_channels = True\n", - "# Smooth channels\n", - "smooth_channels = True\n", - "smooth_number = 5" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "data.load_test(dispersion_test, options = {'clean_na': True, 'clean_na_method': 'drop'})\n", - "# Fancy way to open the test description\n", - "import subprocess\n", - "subprocess.call(['open', join(data.available_tests()[dispersion_test],'test_description.yaml')])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "\n", - "## Get list of common channels\n", - "Displays a warning in case there is a device that has fewer channels than the rest. You can choose whether or not to ignore it or update the list of common channels" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "# Get list of common channels\n", - "save_path = join(data.dataDirectory, 'export/figs', dispersion_test)\n", - "# Test Path\n", - "if not exists(save_path):\n", - " print ('Creating export directory:\\n{}'.format(save_path))\n", - " mkdir(save_path)\n", - "\n", - "list_channels = list()\n", - "# Get list of devices\n", - "list_devices = list(data.tests[dispersion_test].devices.keys())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "# Init list of common channels. Get the one that has the most\n", - "list_channels = data.tests[dispersion_test].devices[list_devices[0]].readings.columns\n", - "# Extract list of common channels\n", - "len_channels = len(list_channels)\n", - "\n", - "for device in list_devices:\n", - " \n", - " if ignore_missing_channels: \n", - " # We don't reduce the list in case the new list is smaller\n", - " list_channels = list(set(list_channels) | set(data.tests[dispersion_test].devices[device].readings.columns))\n", - " else:\n", - " # We reduce it\n", - " list_channels = list(set(list_channels) & set(data.tests[dispersion_test].devices[device].readings.columns))\n", - "\n", - " print ('Device {}'.format(device))\n", - " print ('\\tMin reading at {}'.format(data.tests[dispersion_test].devices[device].readings.index[0]))\n", - " #min_date_records = min(min_date_records, records.readings[dispersion_test]['devices'][device]['data'].index[0])\n", - " print ('\\tMax reading at {}'.format(data.tests[dispersion_test].devices[device].readings.index[-1]))\n", - " #max_date_records = min(max_date_records, records.readings[dispersion_test]['devices'][device]['data'].index[-1])\n", - " print ('\\tNumber of points {}'.format(len(data.tests[dispersion_test].devices[device].readings.index)))\n", - " ## Eliminate devices with no points\n", - " if (len(data.tests[dispersion_test].devices[device].readings.index) == 0):\n", - " print ('Droping device {} for insufficient data points'.format(device))\n", - " data.tests[dispersion_test].devices.pop(device)\n", - " # Check the number of channels \n", - " elif len_channels != len(data.tests[dispersion_test].devices[device].readings.columns): \n", - " print(\"\\tWARNING: Device {} has {}. Current common list length is {}\".format(device, len(data.tests[dispersion_test].devices[device].readings.columns), len_channels))\n", - " len_channels = len(list_channels)\n", - " if ignore_missing_channels:\n", - " print (\"\\tIgnoring missing channels\")\n", - "\n", - "print('Final list of channels:\\n', list_channels)\n", - "\n", - "if min_date is not None: min_date = pd.to_datetime(min_date).tz_localize('UTC').tz_convert('Europe/Madrid')\n", - "if max_date is not None: max_date = pd.to_datetime(max_date).tz_localize('UTC').tz_convert('Europe/Madrid')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "## Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "tags": [ - "show_only_output" - ] - }, - "outputs": [], - "source": [ - "from scipy import stats\n", - "from scipy.stats import t\n", - "import numpy as np\n", - "import traceback\n", - "\n", - "# Calculate the dispersion for the sensors present in the dataset\n", - "dispersion_df = pd.DataFrame()\n", - "dispersion_history = list()\n", - "display(HTML('

Warnings

'))\n", - "warning_displayed = False\n", - "location = None\n", - "\n", - "for device in list_devices:\n", - " location_test = data.tests[dispersion_test].devices[device].location\n", - " if location_test is None: data.std_out (f'Device {device} has no location')\n", - " else:\n", - " if location is None: location = location_test\n", - " elif location_test != location: data.std_out (f'Device {device} has different location!')\n", - " \n", - "for channel in list_channels:\n", - " list_columns = list()\n", - " if channel != 'BATT':\n", - " for device in list_devices:\n", - " if channel in data.tests[dispersion_test].devices[device].readings.columns and len(data.tests[dispersion_test].devices[device].readings.loc[:,channel]) >0 :\n", - " # Important to resample and bfill for unmatching measures\n", - " if smooth_channels:\n", - " channel_new = data.tests[dispersion_test].devices[device].readings[channel].resample('1Min').bfill().rolling(window=smooth_number).mean()\n", - " dispersion_df[channel + '-' + device] = channel_new[channel_new > 0]\n", - " else:\n", - " dispersion_df[channel + '-' + device] = data.tests[dispersion_test].devices[device].readings[channel].resample('1Min').bfill()\n", - " \n", - " list_columns.append(channel + '-' + device)\n", - " else:\n", - " display(HTML('

WARNING: Device {} does not contain {}

'.format(device, channel)))\n", - " warning_displayed = True\n", - " try:\n", - " if dispersion_df.index.tzinfo is None: dispersion_df.index = dispersion_df.index.tz_localize('UTC').tz_convert(location)\n", - " except:\n", - " traceback.print_exc()\n", - " pass\n", - " \n", - " # Trim dataset to min and max dates (normally these tests are carried out with _minutes_ of differences)\n", - " if min_date is not None: dispersion_df = dispersion_df[dispersion_df.index > min_date]\n", - " if max_date is not None: dispersion_df = dispersion_df[dispersion_df.index < max_date]\n", - "\n", - " # Calculate Metrics\n", - " dispersion_df[channel + '_AVG'] = dispersion_df.loc[:,list_columns].mean(skipna=True, axis = 1)\n", - " dispersion_df[channel + '_STD'] = dispersion_df.loc[:,list_columns].std(skipna=True, axis = 1)\n", - " \n", - " # Calculate Metrics\n", - " dispersion_global = dispersion_df[channel + '_STD'].mean()\n", - " # print (dispersion_df.index[0], dispersion_df.index[-1], channel, dispersion_global)\n", - " dispersion_history.append([channel, dispersion_global])\n", - "if not warning_displayed:\n", - " display(HTML('

All devices show similar amounts of data. No data loss concern

'))\n", - " \n", - "# display(HTML('

Sensor dispersion

'))\n", - "# display(HTML('

Below, the sensor dispersion for each channel is listed (units of each sensor)

'))\n", - "# dispersion_history = tuple(dispersion_history)\n", - "# display(HTML('
    '))\n", - "# for item in dispersion_history:\n", - "# display(HTML('
  • {}: {}
  • '.format(item[0], item[1])))\n", - "# display(HTML('
'))\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "tags": [ - "show_only_output" - ] - }, - "source": [ - "## Time Series Plot (Full Period)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "list_channels_kit = ['PRESS', 'CCS811_ECO2', 'EXT_PM_10', 'NOISE_A', 'TEMP', 'CCS811_VOCS', 'HUM', 'EXT_PM_1', 'LIGHT', 'EXT_PM_25']\n", - "test_for_kit = False\n", - "list_channels_plots = list_channels_kit if test_for_kit else list_channels" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "tags": [ - "show_only_output" - ] - }, - "outputs": [], - "source": [ - "if min_date is None: min_date_test = dispersion_df.index[0] \n", - "else: min_date_test = min_date\n", - "if max_date is None: max_date_test = dispersion_df.index[-1] \n", - "else: max_date_test = max_date\n", - "display(HTML('Min Date available: {}'.format(min_date_test)))\n", - "display(HTML('Max Date available: {}'.format(max_date_test)))\n", - "\n", - "dispersion_df_trim = dispersion_df.copy()\n", - "dispersion_df_trim = dispersion_df_trim[dispersion_df_trim.index > min_date_test]\n", - "dispersion_df_trim = dispersion_df_trim[dispersion_df_trim.index < max_date_test]\n", - "\n", - "# Ignore battery\n", - "if 'BATT' in list_channels_plots: list_channels_plots.remove('BATT')\n", - "dict_devices_tbr = dict()\n", - "for item in list_channels_plots: dict_devices_tbr[item] = list()\n", - "\n", - "for channel in list_channels_plots:\n", - " if channel not in list_channels_plots and test_for_kit: continue\n", - " # Make subplot\n", - " list_columns = list()\n", - " fig, (ax1, ax2) = plot.subplots(nrows = 2, figsize= (15,10))\n", - " cmap = plot.cm.Reds\n", - " norm = matplotlib.colors.Normalize(vmin=0, vmax=limit_errors/2)\n", - " index = list_channels_plots.index(channel)+1\n", - " total_number = len(list_channels_plots)\n", - " display(HTML('

({}/{}) - {}

'.format(index, total_number, channel)))\n", - " \n", - " dispersion_avg = 0\n", - " limit_confidence_sigma = 0\n", - " for item in dispersion_history:\n", - " if channel == item[0]:\n", - " dispersion_avg = item[1]\n", - " \n", - " if len(list_devices)>30:\n", - " display(HTML('

Using Normal Distribution. Using limit for sigma confidence: {}'.format(limit_confidence_sigma)))\n", - " limit_confidence = limit_confidence_sigma\n", - " # Calculate upper and lower bounds\n", - " if (use_instantatenous_dispersion):\n", - " # For sensors with high variability in the measurements, it's better to use this (i.e. alphasense)\n", - " upper_bound = dispersion_df_trim[channel + '_AVG'] + limit_confidence * dispersion_df_trim[channel + '_STD']\n", - " lower_bound = dispersion_df_trim[channel + '_AVG'] - abs(limit_confidence * dispersion_df_trim[channel + '_STD'])\n", - " else:\n", - " upper_bound = dispersion_df_trim[channel + '_AVG'] + limit_confidence * dispersion_avg\n", - " lower_bound = dispersion_df_trim[channel + '_AVG'] - abs(limit_confidence * dispersion_avg)\n", - " else:\n", - " limit_confidence = t.interval(t_confidence_level/100.0, len(list_devices), loc=dispersion_df_trim[channel + '_AVG'], scale=dispersion_avg)\n", - " display(HTML('

Using t-student Distribution

'))\n", - " upper_bound = limit_confidence[1]\n", - " lower_bound = limit_confidence[0]\n", - "\n", - " dispersion_df_trim[channel + '_MAX'] = dispersion_df_trim.loc[:,list_columns].max(skipna=True, axis = 1)\n", - " dispersion_df_trim[channel + '_MIN'] = dispersion_df_trim.loc[:,list_columns].min(skipna=True, axis = 1)\n", - " \n", - " # print ('Plotting devices')\n", - " for device in list_devices:\n", - " name_column = channel + '-' + device \n", - " if name_column in dispersion_df_trim.columns:\n", - " # Count how many times we go above the upper bound or below the lower one\n", - " count_problems_up = dispersion_df_trim[name_column] > upper_bound\n", - " count_problems_down = dispersion_df_trim[name_column] < lower_bound\n", - " \n", - " # Count them\n", - " count_problems = [1 if (count_problems_up[i] or count_problems_down[i]) else 0 for i in range(len(count_problems_up))]\n", - " # print (channel, device, np.sum(count_problems), len(count_problems))\n", - " \n", - " # Add the trace in either\n", - " number_errors = np.sum(count_problems)\n", - " max_number_errors = len(count_problems)\n", - " \n", - " if number_errors/max_number_errors > limit_errors/100:\n", - " print (f'WARNING: Device {device} out of {limit_errors}% limit - {np.round(number_errors/max_number_errors*100, 1)}% out')\n", - " if device not in dict_devices_tbr[channel]: dict_devices_tbr[channel].append(device)\n", - " alpha = 1\n", - " ax1.plot(dispersion_df_trim.index, \n", - " dispersion_df_trim[name_column], \n", - " color = 'r',\n", - " label = device, alpha = alpha)\n", - " else:\n", - " alpha = 1\n", - " color = 'g'\n", - " ax2.plot(dispersion_df_trim.index, \n", - " dispersion_df_trim[name_column], \n", - " color = color, \n", - " label = device, alpha = alpha)\n", - " \n", - " # Add upper and low bound bound to subplot 1\n", - " ax1.plot(dispersion_df_trim.index, dispersion_df_trim[channel + '_AVG'],'b', label = 'Average', alpha = 0.6)\n", - " ax1.plot(dispersion_df_trim.index, upper_bound, 'k', label = 'Upper-Bound', alpha = 0.6)\n", - " ax1.plot(dispersion_df_trim.index, lower_bound, 'k',label = 'Lower-Bound', alpha = 0.6)\n", - " \n", - " # Format the legend\n", - " lgd1 = ax1.legend(bbox_to_anchor=(1.1, 0.5), fancybox=True, loc='center left', ncol = 5)\n", - " ax1.grid(True)\n", - " ax1.set_ylabel(channel + ' TBR')\n", - " ax1.set_xlabel('Time')\n", - " \n", - " # Add upper and low bound bound to subplot 2\n", - " ax2.plot(dispersion_df_trim.index, dispersion_df_trim[channel + '_AVG'],'b', label = 'Average', alpha = 0.6)\n", - " ax2.plot(dispersion_df_trim.index, upper_bound, 'k', label = 'Upper-Bound', alpha = 0.6)\n", - " ax2.plot(dispersion_df_trim.index, lower_bound, 'k',label = 'Lower-Bound', alpha = 0.6)\n", - " \n", - " # Format the legend\n", - " ax2.legend(bbox_to_anchor=(1.1, 0.5), fancybox=True, loc='center left', ncol = 5)\n", - " lgd2 = ax2.legend(bbox_to_anchor=(1.1, 0.5), fancybox=True, loc='center left', ncol = 5)\n", - " ax2.grid(True)\n", - " ax2.set_ylabel(channel + ' OK')\n", - " ax2.set_xlabel('Time')\n", - " \n", - " # Check file type to make the export\n", - " if type_file is not None: print ('Saving figure')\n", - " if type_file == 'fig':\n", - " pickle.dump(fig, open(save_path + '/' + dispersion_test + '_' + channel + '.fig.pickle', 'wb'))\n", - " elif type_file == 'png':\n", - " fig.savefig(save_path + '/' + dispersion_test + '_' + channel + '.png', dpi=300, trasnparent = True, bbox_extra_artists=(lgd1, lgd2), bbox_inches='tight' )\n", - "\n", - " # Show plots \n", - " plot.show()\n", - " display(HTML('
'))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "### Summary" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "tags": [ - "show_only_output" - ] - }, - "outputs": [], - "source": [ - "summary_df = pd.DataFrame(index = list_channels_plots)\n", - "\n", - "for item in dispersion_history:\n", - " summary_df.loc[item[0], 'Dispersion'] = item[1]\n", - " if item[0] != 'BATT':\n", - " summary_df.loc[item[0], 'Total Number of devices'] = len(list_devices)\n", - " summary_df.loc[item[0], 'TBR Number of devices'] = len(dict_devices_tbr[item[0]])\n", - " summary_df.loc[item[0], 'OK Number of devices'] = len(list_devices) - len(dict_devices_tbr[item[0]])\n", - " \n", - "display (summary_df)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - }, - "toc": { - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - }, - "toc-showtags": false - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/todo/geo_data.ipynb b/examples/notebooks/todo/geo_data.ipynb deleted file mode 100755 index 3fa01aa4..00000000 --- a/examples/notebooks/todo/geo_data.ipynb +++ /dev/null @@ -1,361 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Geo data example\n", - "\n", - "This example uses the Smart Citizen API python wrapper to make an animation of the SCK history in the world. It uses folium (a leaflet.js implementation)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "Collapsed": "false" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loading blueprints file from: /Users/macoscar/Documents/04_Projects/02_FabLab/01_SmartCitizen/01_Repositories/DataAnalysis/smartcitizen-data/data/interim/blueprints.yaml\n", - "Loaded blueprints file\n", - "\u001b[32m[SUCCESS]: \u001b[0mMerged blueprints\n", - "\u001b[32m[SUCCESS]: \u001b[0mSaved updated sensor names and dumped into /Users/macoscar/Documents/04_Projects/02_FabLab/01_SmartCitizen/01_Repositories/DataAnalysis/smartcitizen-data/data/interim/sensornames_21.json\n", - "\u001b[32m[SUCCESS]: \u001b[0mSaved updated sensor names and dumped into /Users/macoscar/Documents/04_Projects/02_FabLab/01_SmartCitizen/01_Repositories/DataAnalysis/smartcitizen-data/data/interim/sensornames_20.json\n", - "Updating blueprints file from: /Users/macoscar/Documents/04_Projects/02_FabLab/01_SmartCitizen/01_Repositories/DataAnalysis/smartcitizen-data/data/interim/blueprints.yaml\n", - "\u001b[32m[SUCCESS]: \u001b[0mUpdated blueprints file\n", - "Loading calibration data from: /Users/macoscar/Documents/04_Projects/02_FabLab/01_SmartCitizen/01_Repositories/DataAnalysis/smartcitizen-data/data/interim/calibrations.json\n", - "\u001b[32m[SUCCESS]: \u001b[0mLoaded calibration data file\n" - ] - } - ], - "source": [ - "from src.data.api import ScApiDevice\n", - "import folium\n", - "from folium import plugins\n", - "import pandas as pd\n", - "import branca" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Get data\n", - "\n", - "Use the `ScApiDevice.get_word_map()` to get the whole map from the API" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "

JobnameScheduleEnabledValidWhoTaskLogfileActions
{{ cron }}{{ crondict['schedule'] }} - {% else %} - - {% endif %} - {{ crondict['enabled'] }} - {% else %} - - {% endif %} - {{ crondict['valid'] }}{{ crondict['who'] }}{{ crondict['task'] }} {{ crondict['logfile'] }} -
- -
-
- -
-
\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
added_atcitycountry_codedescriptionkit_idlast_reading_atlatitudelongitudenameowner_idowner_usernamestatesystem_tagsupdated_atuser_tags
id
43652017-07-17T14:53:49ZBarcelonaESMaking Sense Pilot #1112017-07-17T14:54:54Z41.3965702.194473Giant Spark Eggnog6066Marabaleshas_published[offline, outdoor]2017-07-17T14:53:49Z[Barcelona, MakingSense, Inside, First Floor, ...
41402017-01-12T13:17:54ZAntofagastaCLDepartamento feliz Geomensura - Geomática =)32017-01-14T11:20:24Z-23.701574-70.420252UA_F.A5696sck.fablab.atacamahas_published[offline, outdoor]2017-10-14T14:21:02Z[Research, Experimental]
41222017-01-05T00:27:58ZLewishamAUTracking the environment of our farm & food fo...32017-08-17T08:57:21Z-33.892412151.147368Lewisham House2940jamesrhas_published[indoor, offline]2017-01-22T04:51:40Z[Experimental]
36182016-07-15T16:26:20ZEsplugues de LlobregatESNone32017-07-22T15:07:39Z41.3745102.100678ISP015452ISP01has_published[offline, outdoor]2016-07-15T16:45:37Z[Barcelona]
24512015-07-22T12:39:32ZKarlsruheDESmart Citizen Kit located in Karlsruhe inside ...32015-12-23T21:03:45Z49.0068908.403653ZKM - Citizen-KiT3921ZKM4Karlsruhehas_published[indoor, offline]2015-10-10T00:09:53Z[]
\n", - "
" - ], - "text/plain": [ - " added_at city country_code \\\n", - "id \n", - "4365 2017-07-17T14:53:49Z Barcelona ES \n", - "4140 2017-01-12T13:17:54Z Antofagasta CL \n", - "4122 2017-01-05T00:27:58Z Lewisham AU \n", - "3618 2016-07-15T16:26:20Z Esplugues de Llobregat ES \n", - "2451 2015-07-22T12:39:32Z Karlsruhe DE \n", - "\n", - " description kit_id \\\n", - "id \n", - "4365 Making Sense Pilot #1 11 \n", - "4140 Departamento feliz Geomensura - Geomática =) 3 \n", - "4122 Tracking the environment of our farm & food fo... 3 \n", - "3618 None 3 \n", - "2451 Smart Citizen Kit located in Karlsruhe inside ... 3 \n", - "\n", - " last_reading_at latitude longitude name \\\n", - "id \n", - "4365 2017-07-17T14:54:54Z 41.396570 2.194473 Giant Spark Eggnog \n", - "4140 2017-01-14T11:20:24Z -23.701574 -70.420252 UA_F.A \n", - "4122 2017-08-17T08:57:21Z -33.892412 151.147368 Lewisham House \n", - "3618 2017-07-22T15:07:39Z 41.374510 2.100678 ISP01 \n", - "2451 2015-12-23T21:03:45Z 49.006890 8.403653 ZKM - Citizen-KiT \n", - "\n", - " owner_id owner_username state system_tags \\\n", - "id \n", - "4365 6066 Marabales has_published [offline, outdoor] \n", - "4140 5696 sck.fablab.atacama has_published [offline, outdoor] \n", - "4122 2940 jamesr has_published [indoor, offline] \n", - "3618 5452 ISP01 has_published [offline, outdoor] \n", - "2451 3921 ZKM4Karlsruhe has_published [indoor, offline] \n", - "\n", - " updated_at user_tags \n", - "id \n", - "4365 2017-07-17T14:53:49Z [Barcelona, MakingSense, Inside, First Floor, ... \n", - "4140 2017-10-14T14:21:02Z [Research, Experimental] \n", - "4122 2017-01-22T04:51:40Z [Experimental] \n", - "3618 2016-07-15T16:45:37Z [Barcelona] \n", - "2451 2015-10-10T00:09:53Z [] " - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Sensor IDs (you can filter, see help(ScApiDevice.get_world_map))\n", - "sensors = ScApiDevice.get_world_map(full = True)\n", - "sensors.set_index('kit_id')\n", - "sensors.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Plot Static map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.visualization.visualization_tools import device_history_map\n", - "\n", - "options = {'fillOpacity':1.0, 'radius': 10, 'zoom': 3.5}\n", - "m = device_history_map(map_type = 'static', dataframe = sensors, options = options)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Save it or show it\n", - "m.save('/path/map.html')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Plot Dynamic Map" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "from src.visualization.visualization_tools import device_history_map\n", - "\n", - "options = {'fillOpacity':0.7, 'radius': 10, 'zoom': 2.5, 'period': '1W'}\n", - "m = device_history_map(map_type = 'dynamic', dataframe = sensors, options = options)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "# Save it or show it\n", - "m.save('/Users/macoscar/Desktop/IAAC-Article/kit_history.html')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/todo/model_creation.ipynb b/examples/notebooks/todo/model_creation.ipynb deleted file mode 100644 index 07cf9197..00000000 --- a/examples/notebooks/todo/model_creation.ipynb +++ /dev/null @@ -1,278 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "toc-hr-collapsed": false - }, - "source": [ - "# Sensor Calibration\n", - "\n", - "This example will walk you through the model creation using two different methods" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.data.data import *\n", - "data = data_wrapper()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "## Load data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "# Set these below or use defaults from config.yaml \n", - "#options = {'clean_na': False, 'clean_na_method': 'drop', 'frequency': '3Min', 'load_cached_API': True, 'store_cached_API': True}\n", - "options = {'clean_na': True, 'clean_na_method': 'drop'}\n", - "testname = \"2019-03_EXT_UCD_URBAN_BACKGROUND_API\"\n", - "data.load_test(testname, options)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "## Create a Linear Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.models.model import model_wrapper\n", - "\n", - "# Input model description\n", - "model_description_ols = {\"model_name\": \"OLS_UCD\",\n", - " \"model_type\": \"OLS\",\n", - " \"model_target\": \"ALPHASENSE\",\n", - " \"data\": {\"train\": {\"2019-03_EXT_UCD_URBAN_BACKGROUND_API\": {\"devices\": [\"5262\"],\n", - " \"reference_device\": \"CITY_COUNCIL\"}},\n", - " \"test\": {\"2019-03_EXT_UCD_URBAN_BACKGROUND_API\": {\"devices\": [\"5565\"],\n", - " \"reference_device\": \"CITY_COUNCIL\"}},\n", - " \"features\": {\"REF\": \"NO2_CONV\",\n", - " \"A\": \"GB_2W\",\n", - " \"B\": \"GB_2A\",\n", - " \"C\": \"HUM\"},\n", - " \"data_options\": {\"frequency\": '1Min',\n", - " \"clean_na\": True,\n", - " \"clean_na_method\": \"drop\",\n", - " \"min_date\": None,\n", - " \"frequency\": \"1Min\",\n", - " \"max_date\": '2019-01-15'},\n", - " },\n", - " \"hyperparameters\": {\"ratio_train\": 0.75},\n", - " \"model_options\": {\"session_active_model\": True,\n", - " \"show_plots\": True,\n", - " \"export_model\": False,\n", - "\t\t\t\t\t\t\t\t\"export_model_file\": False,\n", - "\t\t\t\t\t\t\t\t\"extract_metrics\": True}\n", - " }\n", - "\n", - "# --- \n", - "# Init rf model\n", - "ols_model = model_wrapper(model_description_ols, verbose = True)\n", - "\n", - "# Prepare dataframe for modeling\n", - "test_name = data.prepare_dataframe_model(ols_model)\n", - " \n", - "# Train Model based on training dataset\n", - "ols_model.train()\n", - "\n", - "# Get prediction for train device\n", - "device = ols_model.data['train'][test_name]['devices'][0]\n", - "prediction_name = device + '_' + ols_model.name\n", - "prediction = ols_model.predict(data.tests[test_name].devices[device].readings, prediction_name)\n", - "# Combine it in readings\n", - "data.tests[test_name].devices[device].readings.combine_first(prediction)\n", - "\n", - "# Archive model\n", - "if ols_model.options['session_active_model']:\n", - " data.archive_model(ols_model)\n", - "\n", - "# Print metrics\n", - "if ols_model.options['extract_metrics']:\n", - " metrics_model_ols = ols_model.extract_metrics('train')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "toc-hr-collapsed": false - }, - "source": [ - "## Create a Random Forest" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.models.model import model_wrapper\n", - "\n", - "# Input model description\n", - "model_description_rf = {\"model_name\": \"RF_UCD\",\n", - " \"model_type\": \"RF\",\n", - " \"model_target\": \"ALPHASENSE\",\n", - " \"data\": {\"train\": {\"2019-03_EXT_UCD_URBAN_BACKGROUND_API\": {\"devices\": [\"5262\"],\n", - " \"reference_device\": \"CITY_COUNCIL\"}},\n", - " \"test\": {\"2019-03_EXT_UCD_URBAN_BACKGROUND_API\": {\"devices\": [\"5565\"],\n", - " \"reference_device\": \"CITY_COUNCIL\"}},\n", - " \"features\": {\"REF\": \"NO2_CONV\",\n", - " \"A\": \"GB_2W\",\n", - " \"B\": \"GB_2A\",\n", - " \"C\": \"HUM\"},\n", - " \"data_options\": {\"target_raster\": '1Min',\n", - " \"clean_na\": True,\n", - " \"clean_na_method\": \"drop\",\n", - " \"min_date\": None,\n", - " \"frequency\": \"1Min\",\n", - " \"max_date\": '2019-01-15'},\n", - " },\n", - " \"hyperparameters\": {\"ratio_train\": 0.75, \n", - " \"min_samples_leaf\": 2,\n", - " \"max_features\": None,\n", - " \"n_estimators\": 100,\n", - "\t\t\t\t\t\t\t\t\t\t\"shuffle_split\": True},\n", - " \"model_options\": {\"session_active_model\": True,\n", - " \"show_plots\": True,\n", - " \"export_model\": False,\n", - "\t\t\t\t\t\t\t\t\"export_model_file\": False,\n", - "\t\t\t\t\t\t\t\t\"extract_metrics\": True}\n", - " }\n", - "\n", - "# --- \n", - "# Init rf model\n", - "rf_model = model_wrapper(model_description_rf, verbose = True)\n", - "\n", - "# Prepare dataframe for modeling\n", - "test_name = data.prepare_dataframe_model(rf_model)\n", - " \n", - "# Train Model based on training dataset\n", - "rf_model.train()\n", - "\n", - "# Get prediction for train device\n", - "device = rf_model.data['train'][test_name]['devices'][0]\n", - "prediction_name = device + '_' + rf_model.name\n", - "prediction = rf_model.predict(data.tests[test_name].devices[device].readings, prediction_name)\n", - "# Combine it in readings\n", - "data.tests[test_name].devices[device].readings.combine_first(prediction)\n", - "\n", - "# Archive model\n", - "if rf_model.options['session_active_model']:\n", - " data.archive_model(rf_model)\n", - "\n", - "# Print metrics\n", - "if rf_model.options['extract_metrics']:\n", - " metrics_model_rf = rf_model.extract_metrics('train')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "## Model comparison" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.visualization.visualization import targetDiagram\n", - "%matplotlib inline\n", - "models = dict()\n", - "\n", - "group = 0\n", - "for model in [ols_model, rf_model]:\n", - " for dataset in ['train', 'validation']:\n", - " if dataset in model.metrics.keys(): \n", - " models[model.name + '_' + dataset] = model.metrics[dataset]\n", - " models[model.name + '_' + dataset]['group'] = group\n", - "\n", - "targetDiagram(models, True, 'seaborn-talk')" - ] - } - ], - "metadata": { - "celltoolbar": "Raw Cell Format", - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - }, - "toc": { - "nav_menu": { - "height": "357px", - "width": "307px" - }, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": true, - "toc_position": { - "height": "48px", - "left": "552px", - "top": "705.497px", - "width": "315px" - }, - "toc_section_display": true, - "toc_window_display": true - }, - "toc-autonumbering": false, - "toc-showcode": false, - "toc-showmarkdowntxt": false, - "toc-showtags": false - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/todo/stream_sck.ipynb b/examples/notebooks/todo/stream_sck.ipynb deleted file mode 100755 index 7a5430f4..00000000 --- a/examples/notebooks/todo/stream_sck.ipynb +++ /dev/null @@ -1,286 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "Set things up" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.tools.sck import sck\n", - "from src.saf import *\n", - "import time\n", - "from bokeh.models.sources import ColumnDataSource\n", - "from bokeh.plotting import figure\n", - "from bokeh.io import output_notebook, show, push_notebook\n", - "from threading import Thread\n", - "\n", - "output_notebook()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "Make the kit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "kit = sck()\n", - "kit.begin()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "Some things we can know about this kit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "print (kit.sensor_enabled)\n", - "print (kit.serialPort)\n", - "print (kit.serialPort_name)\n", - "print (kit.sam_firmVer)\n", - "print (kit.sam_firmCommit)\n", - "print (kit.sam_firmBuildDate)\n", - "print (kit.esp_macAddress)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "We can set shell mode for easier interaction" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "kit.toggleShell()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "Here we can define what channels we would like to see, and some processing for them.\n", - "
Each channel can have several processing to do:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "channels_to_monitor = {'Temperature': {'smooth': 10, \n", - " 'time_derivative': 1},\n", - " 'Humidity': {'time_derivative': None}\n", - " }" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "store_to_csv = True\n", - "if store_to_csv: path_to_store = join(getcwd(), 'csv_export.csv'); print (path_to_store)\n", - "# And start monitoring them\n", - "kit.monitor(list(channels_to_monitor.keys()), noms = True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false" - }, - "source": [ - "### Time series plot" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "def process_data(data):\n", - " for channel in channels_to_monitor.keys():\n", - " for process in channels_to_monitor[channel].keys():\n", - " if data.empty: data[channel + '_' + process] = []\n", - " else:\n", - " formula = process + f\"(new_data['{channel}'], channels_to_monitor['{channel}']['{process}'])\"\n", - " data[channel + '_' + process] = eval(formula)\n", - " return data\n", - "\n", - "test_data = ColumnDataSource(data = process_data(kit.worker.example))\n", - "\n", - "from bokeh.models import Panel, Tabs\n", - "from bokeh.layouts import column\n", - "from bokeh.plotting import figure\n", - "from bokeh.io import show\n", - "from bokeh.layouts import gridplot\n", - "\n", - "n_tabs = len(list(channels_to_monitor.keys()))\n", - "tabs = Tabs(tabs = [])\n", - "\n", - "for channel in channels_to_monitor.keys():\n", - " gridplots = list()\n", - " p = figure(background_fill_color=\"#fafafa\", x_axis_type='datetime')\n", - " list_channels = [channel]\n", - " \n", - " if 'smooth' in channels_to_monitor[channel].keys(): list_channels.append(channel + '_' + 'smooth')\n", - " print (list_channels)\n", - " p.scatter(y=list_channels[0], x=\"Time\", source = test_data)\n", - " gridplots.append([p])\n", - " print (gridplots)\n", - " for process in channels_to_monitor[channel].keys():\n", - " if process != 'smooth':\n", - " p = figure(background_fill_color=\"#fafafa\", x_axis_type='datetime')\n", - " print (channel + '_' + process)\n", - " p.scatter(y=channel + '_' + process, x=\"Time\", source = test_data)\n", - " gridplots.append([p])\n", - " \n", - " grid = gridplot(gridplots, plot_width=600, plot_height=400)\n", - " tab = Panel(child=grid, title=channel)\n", - " tabs.tabs.append(tab)\n", - "\n", - "handle = show(tabs, notebook_handle=True)\n", - " \n", - "stop_threads = False\n", - "\n", - "def worker_call(id, stop):\n", - " \n", - " period = .1 # in seconds (simulate waiting for new data)\n", - " n_show = 10 # number of points to keep and show\n", - "\n", - " while True:\n", - " if not kit.worker.output.empty():\n", - " new_data = kit.worker.output.get()\n", - " if 'Time' in new_data.columns: new_data.set_index('Time')\n", - " \n", - " # Add processing\n", - " process_data (new_data)\n", - "\n", - " test_data.stream(new_data, n_show)\n", - " # Store to csv\n", - " if store_to_csv: \n", - " dataframe = pd.DataFrame(test_data.data)\n", - " dataframe.to_csv(path_to_store, sep=\",\")\n", - " \n", - " # Update plot\n", - " push_notebook(handle=handle)\n", - " time.sleep(period)\n", - "\n", - " if stop():\n", - " print(\"Finished thread\")\n", - " break\n", - "\n", - "thread = Thread(target=worker_call, args=(id, lambda: stop_threads))\n", - "thread.start()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "stop_threads = True" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - }, - "toc": { - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/todo/stream_serial_device.ipynb b/examples/notebooks/todo/stream_serial_device.ipynb deleted file mode 100644 index 939c14a2..00000000 --- a/examples/notebooks/todo/stream_serial_device.ipynb +++ /dev/null @@ -1,238 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "from src.tools.serialtools.serialdevice import *\n", - "from src.saf import *\n", - "from threading import Thread\n", - "import itertools \n", - "# Bokeh plotting tools\n", - "from bokeh.palettes import Dark2_5 as palette\n", - "from bokeh.models.sources import ColumnDataSource\n", - "from bokeh.plotting import figure\n", - "from bokeh.io import output_notebook, show, push_notebook\n", - "from bokeh.models import Panel, Tabs\n", - "from bokeh.layouts import column, gridplot\n", - "\n", - "output_notebook()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "esp = serialdevice()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "if esp.set_serial(): esp.update_serial()\n", - "print (f'Device serial number: {esp.serialNumber}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "# Self-explanatory\n", - "store_to_csv = True\n", - "raster = 0.05\n", - "buffer_length = 5\n", - "# Number of points to show\n", - "n_show = 500\n", - "# Set channels to monitor and calculate \n", - "channels_to_monitor = {'y': {'1': {'clean_na': ['fill', 'inplace']},#,\n", - " '2': {'smooth': [3, 'same', 'inplace']},\n", - " #'8': {'clean_na': ['drop', 'other']},\n", - " '3': {'exponential_smoothing': [0.2, 'same']},\n", - " '4': {'exponential_smoothing': [0.1, 'same']},\n", - " '5': {'exponential_smoothing': [0.05, 'same']},\n", - " '6': {'exponential_smoothing': [0.03, 'same']},\n", - " '7': {'exponential_smoothing': [0.02, 'same']},\n", - " '8': {'exponential_smoothing': [0.01, 'same']},\n", - " '9': {'time_derivative': [1, 'same']},\n", - " '10': {'time_diff': [1, 'other']}\n", - " }}\n", - "\n", - "if store_to_csv: path_to_store = join(getcwd(), 'csv_export.csv'); print (f'Saving stream to: {path_to_store}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "def process_data(data):\n", - " data = data.apply(pd.to_numeric, errors='coerce')\n", - " for channel in channels_to_monitor.keys():\n", - " for process_number in channels_to_monitor[channel].keys():\n", - " # Process and formula\n", - " process = list(channels_to_monitor[channel][process_number])[0]\n", - " formula = process + f\"(data['{channel}'], channels_to_monitor['{channel}']['{process_number}']['{process}'][0])\" \n", - " # Name for new channel depending on inplace or not\n", - " if 'inplace' in channels_to_monitor[channel][process_number][process]: channel_new_name = channel\n", - " else: channel_new_name = channel + '_' + process + '_' + str(channels_to_monitor[channel][process_number][process][0])\n", - " # Calculate\n", - " if data.empty: data[channel_new_name] = []\n", - " else: data[channel_new_name] = eval(formula)\n", - " return data\n", - "\n", - "# Start the stream\n", - "esp.start_streaming(buffer_length = buffer_length, raster = raster)\n", - "# Create plot columnar data\n", - "plot_data = ColumnDataSource(data = process_data(esp.worker.example))\n", - "# Number of tabs\n", - "n_tabs = len(list(channels_to_monitor.keys()))\n", - "tabs = Tabs(tabs = [])\n", - "colors = itertools.cycle(palette)\n", - "\n", - "for channel in channels_to_monitor.keys():\n", - " gridplots = list()\n", - " p = figure(background_fill_color=\"#fafafa\", x_axis_type='datetime')\n", - " gridplots.append([p])\n", - " p.line(y = channel, x=\"index\", source = plot_data, legend_label = channel)\n", - " p.title.text = f'Streaming {channel}'\n", - " p.yaxis.axis_label = f'{channel}'\n", - " p.xaxis.axis_label = 'Timestamp'\n", - "\n", - " for process_number in channels_to_monitor[channel].keys():\n", - " process = list(channels_to_monitor[channel][process_number])[0]\n", - " # We have already plotted it if it was inplace\n", - " if 'inplace' in channels_to_monitor[channel][process_number][process]: continue\n", - "\n", - " channel_name = channel + '_' + process + '_' + str(channels_to_monitor[channel][process_number][process][0])\n", - " if 'same' in channels_to_monitor[channel][process_number][process]:\n", - " p.line(y=channel_name, x=\"index\", legend_label = channel_name, source = plot_data, color = next(colors))\n", - " elif 'other' in channels_to_monitor[channel][process_number][process]:\n", - " p = figure(background_fill_color=\"#fafafa\", x_axis_type='datetime')\n", - " p.line(y=channel_name, x=\"index\", legend_label = channel_name, source = plot_data, color = next(colors))\n", - " p.yaxis.axis_label = f'{channel_name}'\n", - " p.xaxis.axis_label = 'Timestamp'\n", - " gridplots.append([p])\n", - "\n", - " p.legend.location='top_left'\n", - " p.legend.click_policy=\"hide\"\n", - "\n", - " grid = gridplot(gridplots, plot_width=1000, plot_height=500)\n", - " tab = Panel(child=grid, title=channel)\n", - " tabs.tabs.append(tab)\n", - "\n", - "handle = show(tabs, notebook_handle=True)\n", - "stop_threads = False\n", - "\n", - "def worker_call(id, device, stop):\n", - " df_data = pd.DataFrame()\n", - " \n", - " while True:\n", - " if not device.worker.output.empty():\n", - " new_data = device.worker.output.get()\n", - " if 'Time' in new_data.columns: new_data.rename(columns={'Time': 'index'}, inplace=True)\n", - " new_data = new_data.set_index('index')\n", - "\n", - " if df_data.empty: df_data = new_data\n", - " else: df_data = pd.concat([df_data, new_data], sort = False)\n", - " \n", - " # We process everything\n", - " # processed_data = process_data(new_data) \n", - " # if df_data.empty: df_data = processed_data\n", - " #else: df_data = pd.concat([df_data, processed_data], sort = False)\n", - " \n", - " # We only process what we show\n", - " processed_data = process_data(df_data.tail(n_show))\n", - " # Stream and processing\n", - " plot_data.stream(processed_data, n_show)\n", - " \n", - " # Store to csv\n", - " if store_to_csv: df_data.to_csv(path_to_store, sep = \",\")\n", - " \n", - " # Update plot\n", - " push_notebook(handle = handle)\n", - "\n", - " if stop(): print(\"Finished thread\"); break\n", - "\n", - "thread = Thread(target=worker_call, args=(id, esp, lambda: stop_threads))\n", - "thread.start()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [ - "stop_threads = True\n", - "if esp.worker.is_alive():\n", - " print ('Terminating device worker')\n", - " esp.worker.terminate()\n", - " esp.worker.join()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false" - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/wip-load-process-test.ipynb b/examples/notebooks/wip-load-process-test.ipynb deleted file mode 100644 index 2d56839e..00000000 --- a/examples/notebooks/wip-load-process-test.ipynb +++ /dev/null @@ -1,371 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import scdata as sc\n", - "from scdata._config import config" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "config._out_level = 'DEBUG'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device = sc.Device(descriptor = {'source': 'api', 'id': '13238'})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "device.load(only_unprocessed = False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.process()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.readings['EXT_PM_A_1'].plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.post_metrics(dry_run=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.update_postprocessing()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.readings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sensors = list(a.columns)\n", - "sensors.__delitem__(0)\n", - "\n", - "for sensor in sensors: a.__delitem__(sensor)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a['id'] = 28" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sensor = 'BATT'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.index.name = 'recorded_at'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.rename(columns = {sensor: 'value'}, inplace=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.columns = pd.MultiIndex.from_product([['sensors'], a.columns])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a['sensors'][['value', 'id']].to_dict('r')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "j = (a.groupby('recorded_at', as_index = True)\n", - " .apply(lambda x: x['sensors'][['value', 'id']].to_dict('r'))\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "j.name = 'sensors'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "device.readings.head(4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "post_data = {\"data\":[]}\n", - "from scdata.utils import localise_date\n", - "for item in j.index:\n", - " post_data[\"data\"].append(\n", - " {\n", - " \"recorded_at\": localise_date(item, 'UTC').strftime('%Y-%m-%dT%H:%M:%SZ'),\n", - " \"sensors\": j[item]\n", - " }\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "for item in post_data['data']: print (item)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "{\n", - " \"data\": [{\n", - " \"recorded_at\": \"2016-06-08 10:30:00\",\n", - " \"sensors\": [{\n", - " \"id\": 22,\n", - " \"value\": 21\n", - " }]\n", - " }]\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (json.dumps(json.loads(a.to_json(orient = 'records')), indent=2, sort_keys=True))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "j = (a.groupby(['recorded_at'], as_index = True)\n", - " .apply(lambda x: x[['id', sensor]].to_dict('r'))\n", - " )\n", - " # .reset_index()\n", - " # .rename(columns={0: 'sensors'})\n", - " # .to_json(orient = 'records'))\n", - "k = {\"data\": j.to_json(orient = 'index', date_format = 'iso')}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "k" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "help(pd.DataFrame.to_json)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "k" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (json.dumps(json.loads(k), indent=2, sort_keys=True))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/scripts/change_kit_id.py b/examples/scripts/change_kit_id.py deleted file mode 100644 index 2956146f..00000000 --- a/examples/scripts/change_kit_id.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python - -import scdata as sc -from scdata._config import config - -config._out_level = 'DEBUG' - -# Device id needs to be as str -device = sc.Device(descriptor = {'source': 'api', 'id': '4742'}) - -device.api_device.get_device_location() -device.api_device.get_device_sensors() -initial_kit_ID = device.api_device.get_kit_ID() - -print ('Initial Kit ID') -print(device.api_device.kit_id) - -# Check this script posts the data -device.api_device.kit_id = 29 - -device.api_device.post_kit_ID() -print (f'Kit ID after making it be {device.api_device.kit_id}') -print (device.api_device.get_kit_ID(update = True)) - -device.api_device.kit_id = initial_kit_ID -device.api_device.post_kit_ID() -check = device.api_device.get_kit_ID(update = True) -print ('Initial Kit ID (again, after reverting the last change)') -print(check) diff --git a/examples/scripts/get_device_data.py b/examples/scripts/get_device_data.py index e08f0e9a..d85f8885 100644 --- a/examples/scripts/get_device_data.py +++ b/examples/scripts/get_device_data.py @@ -1,17 +1,17 @@ #!/usr/bin/python -from scdata.io.device_api import ScApiDevice +from smartcitizen_connector import SCDevice from scdata._config import config # Set verbose level -config._out_level = 'DEBUG' +config.log_level = 'DEBUG' # Device id needs to be as str -device = ScApiDevice('10972') -device.get_device_lat_long() -device.get_device_sensors() +device = SCDevice(10972) +device.options.min_date = None #Don't trim min_date +device.options.max_date = None #Don't trim max_date # Load -data = device.get_device_data(min_date = None, max_date = None, frequency = '1Min', clean_na = None); +await data = device.g(min_date = None, max_date = None, frequency = '1Min', clean_na = None); print (data) diff --git a/examples/scripts/get_device_macs.py b/examples/scripts/get_device_macs.py deleted file mode 100644 index 8cc39f1d..00000000 --- a/examples/scripts/get_device_macs.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/python - -from scdata.io.device_api import ScApiDevice - -print ('Getting device 10972') -device = ScApiDevice('10972') -print (device.get_device_location()) -print (device.get_mac()) - -print ('Getting devices in Barcelona') -wm = ScApiDevice.get_world_map(city = 'Barcelona', max_date = '2020-05-01') -print ('World map get successful') - -for kit in wm: - device = ScApiDevice(kit) - mac = device.get_mac() - if mac: - print (kit, mac) \ No newline at end of file diff --git a/examples/scripts/process_data.py b/examples/scripts/process_data.py deleted file mode 100644 index d87d3bb7..00000000 --- a/examples/scripts/process_data.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/python - -import scdata as sc - -device = sc.Device(blueprint = 'sc_21_station_box', descriptor = {'source': 'api', 'id': '10751'}) - -device.api_device.post_info = dict() -device.api_device.post_info['hardware_id'] = "SCTEST" - -device.__fill_metrics__() -device.load() -device.process() \ No newline at end of file From c3a53c70fe535ac9b7be6d87799ea3e02fa10b4c Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:03:23 +0200 Subject: [PATCH 52/72] Remove connectors --- connectors/README.md | 23 ------ connectors/nilu.json | 183 ------------------------------------------- 2 files changed, 206 deletions(-) delete mode 100644 connectors/README.md delete mode 100644 connectors/nilu.json diff --git a/connectors/README.md b/connectors/README.md deleted file mode 100644 index 5a955581..00000000 --- a/connectors/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Data connectors - -This folder specifies data connectors with other APIs that might interact in some way with the Smart Citizen API. Flexible json format handled by the `configure` method provided by the ApiDevice Handler. - -## Currently implemented connections - -- [nilu](https://iflink.nilu.no/en/home/): https://sensors.nilu.no/api/doc - -## Definition - -``` -{ - "handler": "NiluApiDevice", # Handler class in device_api.py - "name_prepend": "SmartCitizen_", # This will be the name in the target API - SmartCitizen_{id} - "sensors": { - "14": { # SC ID (see https://api.smartcitizen.me/v0/sensors/?per_page=200) - "id": 56, # target ID (for NILU see https://sensors.nilu.no/api/components) - "unitid": 28, # target unit ID - "level": 1 # target level ID - }, - ... -} -``` \ No newline at end of file diff --git a/connectors/nilu.json b/connectors/nilu.json deleted file mode 100644 index 74b47057..00000000 --- a/connectors/nilu.json +++ /dev/null @@ -1,183 +0,0 @@ -{ - "handler": "NiluApiDevice", - "name_prepend": "SmartCitizen_", - "sensors": { - "14": { - "id": 56, - "unitid": 28, - "level": 1 - }, - "53": { - "id": 57, - "unitid": 30, - "level": 1 - }, - "55": { - "id": 15, - "unitid": 2, - "level": 1 - }, - "56": { - "id": 5, - "unitid": 1, - "level": 1 - }, - "58": { - "id": 1, - "unitid": 9, - "level": 1 - }, - "10": { - "id": 60, - "unitid": 32, - "level": 1 - }, - "87": { - "id": 13, - "unitid": 3, - "level": 1 - }, - "89": { - "id": 11, - "unitid": 3, - "level": 1 - }, - "88": { - "id": 12, - "unitid": 3, - "level": 1 - }, - "112": { - "id": 55, - "unitid": 17, - "level": 1 - }, - "113": { - "id": 54, - "unitid": 16, - "level": 1 - }, - "115": { - "id": 13, - "unitid": 3, - "level": 1 - }, - "116": { - "id": 12, - "unitid": 3, - "level": 1 - }, - "117": { - "id": 11, - "unitid": 3, - "level": 1 - }, - "152": { - "id": 2, - "unitid": 16, - "level": 2 - }, - "153": { - "id": 8, - "unitid": 16, - "level": 2 - }, - "154": { - "id": 7, - "unitid": 16, - "level": 2 - }, - "155": { - "id": 58, - "unitid": 16, - "level": 2 - }, - "156": { - "id": 59, - "unitid": 16, - "level": 2 - }, - "157": { - "id": 10, - "unitid": 16, - "level": 2 - }, - "158": { - "id": 3, - "unitid": 17, - "level": 1 - }, - "X001": { - "id": 61, - "unitid": 34, - "level": 1 - }, - "X002": { - "id": 62, - "unitid": 34, - "level": 1 - }, - "X003": { - "id": 63, - "unitid": 34, - "level": 1 - }, - "X004": { - "id": 64, - "unitid": 34, - "level": 1 - }, - "X005": { - "id": 65, - "unitid": 34, - "level": 1 - }, - "X006": { - "id": 66, - "unitid": 34, - "level": 1 - }, - "X007": { - "id": 67, - "unitid": 34, - "level": 1 - }, - "X008": { - "id": 68, - "unitid": 34, - "level": 1 - }, - "X009": { - "id": 69, - "unitid": 34, - "level": 1 - }, - "X010": { - "id": 70, - "unitid": 34, - "level": 1 - }, - "X011": { - "id": 71, - "unitid": 34, - "level": 1 - }, - "X012": { - "id": 72, - "unitid": 34, - "level": 1 - }, - "X013": { - "id": 73, - "unitid": 2, - "level": 1 - } - }, - "kwargs": { - "description": "same_as_api", - "frequency": "options", - "epsg": "config", - "enabled": true, - "sensors": "same_as_device" - } -} \ No newline at end of file From 3f7d54f578acd9e2d24cf2cbf71934f38527c1da Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:09:47 +0200 Subject: [PATCH 53/72] Allow any kind of param to be pased to device --- scdata/device/device.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/scdata/device/device.py b/scdata/device/device.py index 352b4047..4023a337 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -136,7 +136,6 @@ def __set_handler__(self): # TODO Add handler here raise NotImplementedError('No handler for stream yet') - # TODO - Fix to be able to pass other things that are not IDs if self.hclass is not None: self.handler = self.hclass(params = self.paramsParsed) else: @@ -229,7 +228,7 @@ def add_metric(self, metric = dict()): ''' if 'metrics' not in vars(self): - logger.info(f'Device {self.params.id} has no metrics yet. Adding') + logger.info(f'Device {self.paramsParsed.id} has no metrics yet. Adding') self.metrics = list() _metric = TypeAdapter(Metric).validate_python(metric) @@ -431,11 +430,11 @@ def process(self, only_new=False, lmetrics=None): self.postprocessing_updated = False if 'metrics' not in vars(self): - logger.warning(f'Device {self.params.id} has nothing to process. Skipping') + logger.warning(f'Device {self.paramsParsed.id} has nothing to process. Skipping') return process_ok logger.info('---------------------------') - logger.info(f'Processing device {self.params.id}') + logger.info(f'Processing device {self.paramsParsed.id}') if lmetrics is None: _lmetrics = [metric.name for metric in self.metrics] else: _lmetrics = lmetrics @@ -479,7 +478,7 @@ def process(self, only_new=False, lmetrics=None): # If the metric is None, might be for many reasons and shouldn't collapse the process_ok if process_ok: - logger.info(f"Device {self.params.id} processed") + logger.info(f"Device {self.paramsParsed.id} processed") self.processed = process_ok & self.update_postprocessing_date() return self.processed @@ -658,8 +657,8 @@ async def post(self, columns = 'sensors', clean_na = 'drop', chunk_size = 500,\ rename = self._rename, clean_na = clean_na, chunk_size = chunk_size, \ dry_run = dry_run, max_retries = max_retries) - if post_ok: logger.info(f'Posted data for {self.params.id}') - else: logger.error(f'Error posting data for {self.params.id}') + if post_ok: logger.info(f'Posted data for {self.paramsParsed.id}') + else: logger.error(f'Error posting data for {self.paramsParsed.id}') # Post info if requested. It should be updated elsewhere if with_postprocessing and post_ok and not dry_run: @@ -686,5 +685,5 @@ def update_postprocessing(self, dry_run = False): post_ok = self.handler.patch_postprocessing(dry_run=dry_run) - if post_ok: logger.info(f"Postprocessing posted for device {self.params.id}") + if post_ok: logger.info(f"Postprocessing posted for device {self.paramsParsed.id}") return post_ok From 4947a2b25016724c4c1b2c77c99f219f02c30b4b Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:10:08 +0200 Subject: [PATCH 54/72] Remove preprocess of sd-card (commented) --- scdata/test/test.py | 124 ++++++++++++++++++++++---------------------- 1 file changed, 63 insertions(+), 61 deletions(-) diff --git a/scdata/test/test.py b/scdata/test/test.py index a9f9ce27..3d2dc3a9 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -19,7 +19,7 @@ from scdata.io import read_csv_file, export_csv_file from scdata._config import config from scdata import Device -from scdata.models import TestOptions#, TestResult +from scdata.models import TestOptions class Test(BaseModel): @@ -148,7 +148,8 @@ def create(self): else: logger.info (f'Overwriting test. Full name: {self.name}') - self.__preprocess__() + # TODO Remove + # self.__preprocess__() self.__dump__() logger.info (f'Test creation finished. Name: {self.name}') @@ -192,79 +193,80 @@ def process(self, only_new = False): return process_ok - # TODO - CHECK FOR CSV FILES - def __preprocess__(self): - ''' - Processes the files for one test, given that the devices and details have been added - ''' + # # TODO - CHECK FOR CSV FILES + # def __preprocess__(self): + # ''' + # Processes the files for one test, given that the devices and details have been added + # ''' - logger.info('Processing files...') - def get_raw_files(): - list_raw_files = [] - for device in self.devices: - if device.source.type == 'csv': - list_raw_files.append(device.source.files.raw_data_file) + # logger.info('Processing files...') + # def get_raw_files(): + # list_raw_files = [] + # for device in self.devices: + # if device.source.type == 'sd-csv': + # list_raw_files.append(device.source.files.raw_data_file) - return list_raw_files + # return list_raw_files - def copy_raw_files(_raw_src_path, _raw_dst_path, _list_raw_files): - try: + # def copy_raw_files(_raw_src_path, _raw_dst_path, _list_raw_files): + # try: - for item in _list_raw_files: - s = join(_raw_src_path, item) - d = join(_raw_dst_path, item.split('/')[-1]) - copyfile(s, d) + # for item in _list_raw_files: + # s = join(_raw_src_path, item) + # d = join(_raw_dst_path, item.split('/')[-1]) + # copyfile(s, d) - logger.info('Copy raw files: OK') + # logger.info('Copy raw files: OK') - return True + # return True - except: - logger.error('Problem copying raw files') - print_exc() - return False + # except: + # logger.error('Problem copying raw files') + # print_exc() + # return False - def date_parser(s, a): - return parser.parse(s).replace(microsecond=int(a[-3:])*1000) + # def date_parser(s, a): + # return parser.parse(s).replace(microsecond=int(a[-3:])*1000) - # Define paths - raw_src_path = join(config.paths['data'], 'raw') - raw_dst_path = join(self.path, 'raw') + # # Define paths + # raw_src_path = join(config.paths['data'], 'raw') + # raw_dst_path = join(self.path, 'raw') - # Create path - if not exists(raw_dst_path): makedirs(raw_dst_path) + # # Create path + # if not exists(raw_dst_path): makedirs(raw_dst_path) # Get raw files - list_raw_files = get_raw_files() + # list_raw_files = get_raw_files() # Copy raw files and process data - if len(list_raw_files): - if copy_raw_files(raw_src_path, raw_dst_path, list_raw_files): - - # Process devices - for device in self.devices: - if device.source.type == 'csv': - - logger.info (f'Processing csv from device {device.id}...') - src_path = join(raw_src_path, device.raw_data_file) - dst_path = join(self.path, device.processed_data_file) - - # Load csv file, only localising and removing - df = read_csv_file(file_path = src_path, - timezone = device.timezone, - frequency = device.frequency, - clean_na = None, - index_name = device.sources[device.source]['index'], - skiprows = device.sources[device.source]['header_skip'], - sep = device.sources[device.source]['sep'], - tzaware = device.sources[device.source]['tz-aware'], - resample = device.resample - ) - df.index.rename(config._csv_defaults['index_name'], inplace=True) - df.to_csv(dst_path, sep=config._csv_defaults['sep']) - - logger.info('Files preprocessed') - logger.info(f'Test {self.name} path: {self.path}') + # if len(list_raw_files): + # if copy_raw_files(raw_src_path, raw_dst_path, list_raw_files): + + # # Process devices + # for device in self.devices: + # ## Make this for CSV devices + # if device.source.type == 'sd-csv': + + # logger.info (f'Processing csv from device {device.id}...') + # src_path = join(raw_src_path, device.raw_data_file) + # dst_path = join(self.path, device.processed_data_file) + + # # Load csv file, only localising and removing + # df = read_csv_file(file_path = src_path, + # timezone = device.timezone, + # frequency = device.frequency, + # clean_na = None, + # index_name = device.sources[device.source]['index'], + # skiprows = device.sources[device.source]['header_skip'], + # sep = device.sources[device.source]['sep'], + # tzaware = device.sources[device.source]['tz-aware'], + # resample = device.resample + # ) + # df.index.rename(config._csv_defaults['index_name'], inplace=True) + # df.to_csv(dst_path, sep=config._csv_defaults['sep']) + + # logger.info('Files preprocessed') + # logger.info(f'Test {self.name} path: {self.path}') @model_serializer def ser_model(self) -> Dict[str, Any]: From 4debd706beacbd14eecca2863c711de83dba4e15 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:10:32 +0200 Subject: [PATCH 55/72] Minor lint --- scdata/device/process/alphasense.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index 72cc9782..5c41d7e4 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -173,9 +173,11 @@ def ec_sensor_temp(dataframe, **kwargs): Temperature series """ if 'priority' in kwargs: - if kwargs['priority'] in dataframe.columns: return dataframe[kwargs['priority']] + if kwargs['priority'] in dataframe.columns: + return dataframe[kwargs['priority']] for option in alphasense_temp_channel: - if option in dataframe.columns: return dataframe[option] + if option in dataframe.columns: + return dataframe[option] logger.error('Problem with input data') return None From 9d0c3fa6292351f01c63fa144d3083b2701f5387 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:10:59 +0200 Subject: [PATCH 56/72] WIP - file export --- scdata/test/export/to_file.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scdata/test/export/to_file.py b/scdata/test/export/to_file.py index 26cf0c05..91b407e5 100755 --- a/scdata/test/export/to_file.py +++ b/scdata/test/export/to_file.py @@ -70,6 +70,8 @@ def to_html(self, title = 'Your title here', template = 'sc_template.html', path rendered: flask rendered template ''' + # TODO - Update or remove + raise NotImplementedError # Find the path to the html templates directory template_folder = join(dirname(__file__), 'templates') @@ -80,9 +82,9 @@ def to_html(self, title = 'Your title here', template = 'sc_template.html', path logger.info('Creating folder for test export') makedirs(path) - filename = join(path, f'{self.full_name}.html') + filename = join(path, f'{self.name}.html') - docname = sub('.','_', self.full_name) + docname = sub('.','_', self.name) app = flask.Flask(docname, template_folder = template_folder) with app.app_context(): From a19a8b56da3e2a5e33e713dab69fc0448c20157b Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:11:30 +0200 Subject: [PATCH 57/72] WIP Zenodo upload --- scdata/tools/zenodo.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scdata/tools/zenodo.py b/scdata/tools/zenodo.py index f37bf99c..708c1651 100644 --- a/scdata/tools/zenodo.py +++ b/scdata/tools/zenodo.py @@ -1,7 +1,7 @@ ''' Implementation of zenodo export ''' from scdata._config import config -from scdata.tools import logger, get_tests_log +from scdata.tools.custom_logger import logger from scdata.tools.report import include_footer from scdata import Test import json, yaml @@ -37,6 +37,7 @@ def zenodo_upload(upload_descritor, sandbox = True, dry_run = True): True if all data is uploaded, False otherwise ''' + raise NotImplementedError def fill_template(individual_descriptor, descriptor_file_name, upload_type = 'dataset'): # Open base template with all keys @@ -213,7 +214,7 @@ def upload_file(url, upload_metadata, files): files = {'file': open(file_path, 'rb')} file_size = getsize(file_path)/(1024*1024.0*1024) - if file_size > 50: logger.warning(f'File size for {file_name} over 50Gb ({fi 'WARNING') + if file_size > 50: logger.warning(f'File size for {file_name} over 50Gb') if not dry_run: status_code = upload_file(url, upload_metadata, files) else: status_code = 200 From aa05dfc3f1a7d0f53dd73124e2ae90aaf06933f5 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Fri, 3 May 2024 13:11:58 +0200 Subject: [PATCH 58/72] Add MANIFEST tools --- MANIFEST.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 85de688b..5106b64b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,6 @@ include LICENSE include setup.py include setup.cfg include scdata/test/export/templates/sc_template.html -include scdata/utils/interim/*.csv -include scdata/utils/zenodo_templates/*.* -include scdata/utils/uploads/*.* \ No newline at end of file +include scdata/tools/interim/*.csv +include scdata/tools/zenodo_templates/*.* +include scdata/tools/uploads/*.* \ No newline at end of file From a987518586496c87f32c5b62a3983ce4da0bf9ec Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 8 May 2024 20:47:59 +0200 Subject: [PATCH 59/72] Make sure loaded is updated --- scdata/test/test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scdata/test/test.py b/scdata/test/test.py index 3d2dc3a9..a0c1ed91 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -370,4 +370,5 @@ async def load(self): logger.info('Test load done') if self.options.cache: self.cache() - return all([d.loaded for d in self.devices]) \ No newline at end of file + self.loaded = all([d.loaded for d in self.devices]) + return self.loaded \ No newline at end of file From d34a578ab127b3e033500a6572f015f609a2aaa3 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 8 May 2024 20:48:35 +0200 Subject: [PATCH 60/72] Avoid making a wrong process status if no postprocessing --- scdata/device/device.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scdata/device/device.py b/scdata/device/device.py index 4023a337..68ad4567 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -491,7 +491,8 @@ def update_postprocessing_date(self): latest_postprocessing = localise_date(self.data.index[-1]+\ to_timedelta(self.options.frequency), 'UTC') if self.handler.update_latest_postprocessing(latest_postprocessing): - if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing: + # Consider the case of no postprocessing, to avoid making the whole thing false + if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing or self.json.postprocessing is None: self.postprocessing_updated = True else: self.postprocessing_updated = False From 1efcb7937e7ff3be69e8ac78ac55a05018b55f64 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Wed, 8 May 2024 20:56:03 +0200 Subject: [PATCH 61/72] Fix requirements and tests running issues --- .../python-app.yml => python-latest.yml} | 33 +---------- .../workflows/python-multiple-versions.yml | 43 ++++++++++++++ .github/workflows/python-publish.yml | 2 +- requirements.txt | 28 ++++++++++ scdata/_config/config.py | 4 +- scdata/device/device.py | 2 +- scdata/device/process/alphasense.py | 2 +- scdata/test/plot/__init__.py | 4 +- scdata/test/test.py | 7 ++- setup.py | 26 +-------- tests/all/devices/test_csv_device.py | 40 +++++++++++++ tests/all/devices/test_sc_device.py | 34 +++++++++++ tests/all/test/test_sc_test.py | 56 +++++++++++++++++++ .../devices/test/test_sc_test.py | 40 +++++++++++++ .../cross-version/devices/test_csv_device.py | 40 +++++++++++++ tests/cross-version/devices/test_sc_device.py | 34 +++++++++++ tests/cross-version/test/test_sc_test.py | 40 +++++++++++++ tests/devices/test_sc_device.py | 28 ---------- tests/test/test_sc_test.py | 31 ---------- 19 files changed, 372 insertions(+), 122 deletions(-) rename .github/{workflows/python-app.yml => python-latest.yml} (61%) create mode 100644 .github/workflows/python-multiple-versions.yml create mode 100644 requirements.txt create mode 100644 tests/all/devices/test_csv_device.py create mode 100644 tests/all/devices/test_sc_device.py create mode 100644 tests/all/test/test_sc_test.py create mode 100644 tests/cross-version/devices/test/test_sc_test.py create mode 100644 tests/cross-version/devices/test_csv_device.py create mode 100644 tests/cross-version/devices/test_sc_device.py create mode 100644 tests/cross-version/test/test_sc_test.py delete mode 100644 tests/devices/test_sc_device.py delete mode 100644 tests/test/test_sc_test.py diff --git a/.github/workflows/python-app.yml b/.github/python-latest.yml similarity index 61% rename from .github/workflows/python-app.yml rename to .github/python-latest.yml index 3341cb5e..b4c183a4 100644 --- a/.github/workflows/python-app.yml +++ b/.github/python-latest.yml @@ -10,11 +10,11 @@ on: branches: [ "master" ] jobs: - build: + build-latest: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9"] + python-version: ["3.11"] steps: - uses: actions/checkout@v2 - name: Set up Python @@ -37,31 +37,4 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | - pytest - - name: Build package - run: | - python setup.py sdist bdist_wheel - - name: Archive artifacts - uses: actions/upload-artifact@v1 - with: - name: scdata-pkg - path: dist - pypi-publish: - name: Upload release to PyPI - if: startsWith(github.ref, 'refs/tags/') - needs: - - build - runs-on: ubuntu-latest - environment: - name: pypi - url: https://pypi.org/p/scdata - permissions: - id-token: write - steps: - - name: Download all the dists - uses: actions/download-artifact@v3 - with: - name: python-package-distributions - path: dist/ - - name: Publish distribution 📦 to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 + pytest tests/all diff --git a/.github/workflows/python-multiple-versions.yml b/.github/workflows/python-multiple-versions.yml new file mode 100644 index 00000000..ff803c65 --- /dev/null +++ b/.github/workflows/python-multiple-versions.yml @@ -0,0 +1,43 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python application + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + build: + runs-on: "${{ matrix.os }}" + strategy: + fail-fast: true + matrix: + os: ["ubuntu-latest", "windows-latest", "macos-latest"] + python-version: ["3.9", "3.10", "3.11"] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install . + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + pytest tests/cross-version + diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index bdaab28a..a04c2879 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v3 with: - python-version: '3.x' + python-version: '3.11' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..beeb1863 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,28 @@ +# TODO To be updated? +branca~=0.4.0 +# Flask, # TODO Add once finished with file reports +# TODO To be updated? +folium~=0.12.1 +# TODO To be updated? +geopy~=1.21.0 +# TODO To be updated? +Jinja2~=3.1.2 +# TODO To be updated? +matplotlib~=3.2.1 +# TODO To be updated? +missingno~=0.5.2 +numpy~=1.25.2 +pandas~=2.2.2 +pydantic +pytest +# TODO To be updated? +PyYAML==5.3.1 +requests +scipy +scikit-learn +seaborn +smartcitizen-connector +termcolor==1.1.0 +tqdm~=4.50.2 +timezonefinder~=6.1.9 +urllib3 \ No newline at end of file diff --git a/scdata/_config/config.py b/scdata/_config/config.py index bd63ca42..828c8b05 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -611,7 +611,7 @@ def get_paths(self): # Check for blueprints and calibrations # Find the path to the interim folder _dir = dirname(__file__) - _idir = join(_dir, 'interim') + _idir = join(_dir, '../tools/interim') # - Models and local tests _paths['models'] = join(_paths['data'], 'models') @@ -645,7 +645,7 @@ def get_paths(self): # Check for uploads _example_uploads = ['example_upload_1.json', 'example_zenodo_upload.yaml'] - _udir = join(_dir, 'uploads') + _udir = join(_dir, '../tools/uploads') for item in _example_uploads: s = join(_udir, item) d = join(_paths['uploads'], item) diff --git a/scdata/device/device.py b/scdata/device/device.py index 68ad4567..44a2e7f1 100644 --- a/scdata/device/device.py +++ b/scdata/device/device.py @@ -492,7 +492,7 @@ def update_postprocessing_date(self): to_timedelta(self.options.frequency), 'UTC') if self.handler.update_latest_postprocessing(latest_postprocessing): # Consider the case of no postprocessing, to avoid making the whole thing false - if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing or self.json.postprocessing is None: + if latest_postprocessing.to_pydatetime() == self.handler.latest_postprocessing or self.handler.json.postprocessing is None: self.postprocessing_updated = True else: self.postprocessing_updated = False diff --git a/scdata/device/process/alphasense.py b/scdata/device/process/alphasense.py index 5c41d7e4..2a2e60c7 100644 --- a/scdata/device/process/alphasense.py +++ b/scdata/device/process/alphasense.py @@ -4,7 +4,7 @@ from scdata._config import config from scdata.device.process.params import * from scdata.device.process import baseline_calc, clean_ts -from scipy.stats.stats import linregress +from scipy.stats import linregress import matplotlib.pyplot as plt from pandas import date_range, DataFrame, Series, isnull diff --git a/scdata/test/plot/__init__.py b/scdata/test/plot/__init__.py index a41d01f2..a9495e2a 100644 --- a/scdata/test/plot/__init__.py +++ b/scdata/test/plot/__init__.py @@ -1,11 +1,11 @@ from .ts_plot import ts_plot -from .ts_iplot import ts_iplot +# from .ts_iplot import ts_iplot from scdata._config import config if config._ipython_avail: from .ts_uplot import ts_uplot from .ts_dispersion_uplot import ts_dispersion_uplot from .scatter_plot import scatter_plot -from .scatter_iplot import scatter_iplot +# from .scatter_iplot import scatter_iplot from .ts_scatter import ts_scatter from .heatmap_plot import heatmap_plot from .heatmap_iplot import heatmap_iplot diff --git a/scdata/test/test.py b/scdata/test/test.py index a0c1ed91..a89f47a1 100644 --- a/scdata/test/test.py +++ b/scdata/test/test.py @@ -23,9 +23,10 @@ class Test(BaseModel): - from .plot import (ts_plot, ts_iplot, device_metric_map, path_plot, - scatter_plot, scatter_iplot, ts_scatter, - heatmap_plot, heatmap_iplot, + from .plot import (ts_plot, device_metric_map, path_plot, + scatter_plot, ts_scatter, + # ts_iplot, scatter_iplot, heatmap_iplot, + heatmap_plot, box_plot, ts_dendrogram, ts_dispersion_plot, ts_dispersion_grid, scatter_dispersion_grid) diff --git a/setup.py b/setup.py index a6e8292b..8b70a7c6 100755 --- a/setup.py +++ b/setup.py @@ -15,6 +15,8 @@ "Source Code": "https://github.com/fablabbcn/smartcitizen-data", } +REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()] + setup( name='scdata', version='0.9.1', @@ -36,29 +38,7 @@ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Programming Language :: Python :: 3', ], - install_requires=[ - 'branca~=0.4.0', - 'Flask~=2.2.2', - 'folium~=0.12.1', - 'geopy~=1.21.0', - 'Jinja2~=3.1.2', - 'matplotlib~=3.2.1', - 'missingno~=0.5.2', - 'numpy~=1.20.0', - 'pandas~=1.5.0', - 'plotly~=4.14.3', - 'pytest', - 'PyYAML==5.3.1', - 'requests==2.23.0', - 'scipy~=1.5.0', - 'scikit-learn~=1.0.1', - 'seaborn~=0.11.2', - 'termcolor==1.1.0', - 'tqdm~=4.50.2', - 'timezonefinder~=6.1.9', - 'urllib3==1.25.9', - 'Werkzeug==2.2.2' - ], + install_requires=[REQUIREMENTS], setup_requires=['wheel'], python_requires=">=3.6", include_package_data=True, diff --git a/tests/all/devices/test_csv_device.py b/tests/all/devices/test_csv_device.py new file mode 100644 index 00000000..e6ede997 --- /dev/null +++ b/tests/all/devices/test_csv_device.py @@ -0,0 +1,40 @@ +import pytest +import scdata as sc +from scdata._config import config +import asyncio +import os + +# Set basic configs +config._log_level = 'DEBUG' +config.data['strict_load'] = False + +def test_csv_device(): + id = 16838 + path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..', 'scdata/tools/interim/example.csv') + frequency = '5Min' + + min_date = '2020-09-02T15:35:19Z' + blueprint = 'sc_air' + + d = sc.Device( + blueprint=blueprint, + source={'type':'csv', + 'handler': 'CSVHandler', + 'module': 'scdata.io.device_file'}, + params=sc.CSVParams(id=id, + path=path, + timezone='Europe/Madrid') + ) + + # Make device + d.options.frequency = frequency + d.options.min_date = min_date + load_status = asyncio.run(d.load()) + + m = d.data.index[0].tz_convert('UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + process_status = d.process() + + assert d.blueprint == blueprint, resp.text + assert load_status == True, resp.text + assert process_status == True, resp.text + assert m == min_date diff --git a/tests/all/devices/test_sc_device.py b/tests/all/devices/test_sc_device.py new file mode 100644 index 00000000..0d57d362 --- /dev/null +++ b/tests/all/devices/test_sc_device.py @@ -0,0 +1,34 @@ +import pytest +import scdata as sc +from scdata._config import config +import asyncio + +# Set basic configs +config._log_level = 'DEBUG' +config.data['strict_load'] = False + +def test_api_device(): + id = 16838 + frequency = '1Min' + uuid = "80e684e5-359f-4755-aec9-30fc0c84415f" + min_date = '2022-09-10T00:00:00Z' + blueprint = 'sc_air' + + d = sc.Device(blueprint=blueprint, + params=sc.APIParams(id=id), + options=sc.DeviceOptions( + min_date=min_date, + frequency=frequency) + ) + + load_status = asyncio.run(d.load()) + + j = d.handler.json + m = d.data.index[0].tz_convert('UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + process_status = d.process() + + assert d.blueprint == blueprint, resp.text + assert load_status == True, resp.text + assert process_status == True, resp.text + assert j.uuid == uuid, resp.text + assert m == min_date diff --git a/tests/all/test/test_sc_test.py b/tests/all/test/test_sc_test.py new file mode 100644 index 00000000..d2e49770 --- /dev/null +++ b/tests/all/test/test_sc_test.py @@ -0,0 +1,56 @@ +import pytest +import scdata as sc +from scdata.models import Metric +from scdata._config import config +import time +import asyncio +from scdata.tools.date import localise_date + +# Set basic configs +config._out_level = 'DEBUG' +config.data['strict_load'] = False + +def test_test(): + # Test couple of weirded format dates + min_date = '2023-09-20 08:19:10-0700' + now = time.localtime() + devices_list=[16838, 16989] + + # Make test + t = sc.Test(name=f'CHECK_{now.tm_year}-{now.tm_mon}-{now.tm_mday}', + devices=[sc.Device(blueprint='sc_air', + params=sc.APIParams(id=d), + options=sc.DeviceOptions(min_date=min_date)) for d in devices_list], + force_recreate=True) + + # Test requests + load_status = asyncio.run(t.load()) + + # Test processes + metric = Metric(name='NOISE_A_SMOOTH', + description='Basic smoothing calculation', + function='rolling_avg', + kwargs= {'name': ['NOISE_A'], 'window_size': 5} + ) + t.get_device(16838).add_metric(metric) + process_status = t.process() + metric_in_df = 'NOISE_A_SMOOTH' in t.get_device(16838).data.columns + + # Test plots + traces = { + "1": {"devices": 16838, + "channel": "NOISE_A", + "subplot": 1}, + } + figure_mpl = t.ts_plot(traces) + figure_uplot = t.ts_uplot(traces) + + # Test + + assert process_status == True + assert t.loaded == True + assert metric_in_df == True + for device in t.devices: + assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text + assert figure is not None + assert figure_uplot is not None \ No newline at end of file diff --git a/tests/cross-version/devices/test/test_sc_test.py b/tests/cross-version/devices/test/test_sc_test.py new file mode 100644 index 00000000..93081f54 --- /dev/null +++ b/tests/cross-version/devices/test/test_sc_test.py @@ -0,0 +1,40 @@ +import pytest +import scdata as sc +from scdata.models import Metric +from scdata._config import config +import time +import asyncio +from scdata.tools.date import localise_date + +# Set basic configs +config._out_level = 'DEBUG' +config.data['strict_load'] = False + +def test_test(): + # Test couple of weirded format dates + min_date = '2023-09-20 08:19:10-0700' + now = time.localtime() + devices_list=[16838, 16989] + + # Make test + t = sc.Test(name=f'CHECK_{now.tm_year}-{now.tm_mon}-{now.tm_mday}', + devices=[sc.Device(blueprint='sc_air', + params=sc.APIParams(id=d), + options=sc.DeviceOptions(min_date=min_date)) for d in devices_list], + force_recreate=True) + + load_status = asyncio.run(t.load()) + metric = Metric(name='NOISE_A_SMOOTH', + description='Basic smoothing calculation', + function='rolling_avg', + kwargs= {'name': ['NOISE_A'], 'window_size': 5} + ) + t.get_device(16838).add_metric(metric) + process_status = t.process() + metric_in_df = 'NOISE_A_SMOOTH' in t.get_device(16838).data.columns + + assert process_status == True + assert t.loaded == True + assert metric_in_df == True + for device in t.devices: + assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text \ No newline at end of file diff --git a/tests/cross-version/devices/test_csv_device.py b/tests/cross-version/devices/test_csv_device.py new file mode 100644 index 00000000..e6ede997 --- /dev/null +++ b/tests/cross-version/devices/test_csv_device.py @@ -0,0 +1,40 @@ +import pytest +import scdata as sc +from scdata._config import config +import asyncio +import os + +# Set basic configs +config._log_level = 'DEBUG' +config.data['strict_load'] = False + +def test_csv_device(): + id = 16838 + path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..', 'scdata/tools/interim/example.csv') + frequency = '5Min' + + min_date = '2020-09-02T15:35:19Z' + blueprint = 'sc_air' + + d = sc.Device( + blueprint=blueprint, + source={'type':'csv', + 'handler': 'CSVHandler', + 'module': 'scdata.io.device_file'}, + params=sc.CSVParams(id=id, + path=path, + timezone='Europe/Madrid') + ) + + # Make device + d.options.frequency = frequency + d.options.min_date = min_date + load_status = asyncio.run(d.load()) + + m = d.data.index[0].tz_convert('UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + process_status = d.process() + + assert d.blueprint == blueprint, resp.text + assert load_status == True, resp.text + assert process_status == True, resp.text + assert m == min_date diff --git a/tests/cross-version/devices/test_sc_device.py b/tests/cross-version/devices/test_sc_device.py new file mode 100644 index 00000000..0d57d362 --- /dev/null +++ b/tests/cross-version/devices/test_sc_device.py @@ -0,0 +1,34 @@ +import pytest +import scdata as sc +from scdata._config import config +import asyncio + +# Set basic configs +config._log_level = 'DEBUG' +config.data['strict_load'] = False + +def test_api_device(): + id = 16838 + frequency = '1Min' + uuid = "80e684e5-359f-4755-aec9-30fc0c84415f" + min_date = '2022-09-10T00:00:00Z' + blueprint = 'sc_air' + + d = sc.Device(blueprint=blueprint, + params=sc.APIParams(id=id), + options=sc.DeviceOptions( + min_date=min_date, + frequency=frequency) + ) + + load_status = asyncio.run(d.load()) + + j = d.handler.json + m = d.data.index[0].tz_convert('UTC').strftime('%Y-%m-%dT%H:%M:%SZ') + process_status = d.process() + + assert d.blueprint == blueprint, resp.text + assert load_status == True, resp.text + assert process_status == True, resp.text + assert j.uuid == uuid, resp.text + assert m == min_date diff --git a/tests/cross-version/test/test_sc_test.py b/tests/cross-version/test/test_sc_test.py new file mode 100644 index 00000000..93081f54 --- /dev/null +++ b/tests/cross-version/test/test_sc_test.py @@ -0,0 +1,40 @@ +import pytest +import scdata as sc +from scdata.models import Metric +from scdata._config import config +import time +import asyncio +from scdata.tools.date import localise_date + +# Set basic configs +config._out_level = 'DEBUG' +config.data['strict_load'] = False + +def test_test(): + # Test couple of weirded format dates + min_date = '2023-09-20 08:19:10-0700' + now = time.localtime() + devices_list=[16838, 16989] + + # Make test + t = sc.Test(name=f'CHECK_{now.tm_year}-{now.tm_mon}-{now.tm_mday}', + devices=[sc.Device(blueprint='sc_air', + params=sc.APIParams(id=d), + options=sc.DeviceOptions(min_date=min_date)) for d in devices_list], + force_recreate=True) + + load_status = asyncio.run(t.load()) + metric = Metric(name='NOISE_A_SMOOTH', + description='Basic smoothing calculation', + function='rolling_avg', + kwargs= {'name': ['NOISE_A'], 'window_size': 5} + ) + t.get_device(16838).add_metric(metric) + process_status = t.process() + metric_in_df = 'NOISE_A_SMOOTH' in t.get_device(16838).data.columns + + assert process_status == True + assert t.loaded == True + assert metric_in_df == True + for device in t.devices: + assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text \ No newline at end of file diff --git a/tests/devices/test_sc_device.py b/tests/devices/test_sc_device.py deleted file mode 100644 index aa077167..00000000 --- a/tests/devices/test_sc_device.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -import scdata as sc -from scdata._config import config - -# Set basic configs -config._out_level = 'DEBUG' -config.data['strict_load'] = False - -def test_device(): - id = 16549 - uuid = "d030cb8a-2c2a-429e-9f04-416888708193" - min_date = '2023-07-29T09:00:06Z' - blueprint = 'sc_21_station_module' - - # Make device - device = sc.Device(descriptor = {'source': 'api', - 'id': id}) - - load_status = device.load(options={'min_date': min_date}) - j = device.api_device.devicejson - m = device.readings.index[0].tz_convert('UTC').strftime('%Y-%m-%dT%H:%M:%SZ') - process_status = device.process() - - assert device.blueprint == blueprint, resp.text - assert load_status == True, resp.text - assert process_status == True, resp.text - assert j['uuid'] == uuid, resp.text - assert m == min_date diff --git a/tests/test/test_sc_test.py b/tests/test/test_sc_test.py deleted file mode 100644 index 337616e6..00000000 --- a/tests/test/test_sc_test.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest -import scdata as sc -from scdata._config import config -import time - -# Set basic configs -config._out_level = 'DEBUG' -config.data['strict_load'] = False - -def test_test(): - # Test couple of weirded format dates - min_date = '2023-09-20 08:19:10-0700' - max_date = '2023-09-30 08:19:12' - - now = time.localtime() - # Make test - t = sc.Test(f'CHECK_{now.tm_year}-{now.tm_mon}-{now.tm_mday}') - t.add_devices_list(blueprint = 'sc_21_station_module', - devices_list = [16609, "15618"]) - - name = t.create() - t.load(options={'min_date': min_date, 'max_date': max_date}) - - load_status = t.loaded - process_status = t.process() - - assert load_status == True, resp.text - assert process_status == True, resp.text - for device in t.devices: - assert (sc.utils.localise_date(min_date, 'UTC') < t.devices[device].readings.index[0]), resp.text - assert (sc.utils.localise_date(max_date, 'UTC') > t.devices[device].readings.index[0]), resp.text From 6cd010f7e0bb0375aae9112f7ca90b27ff559f87 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 13:40:07 +0200 Subject: [PATCH 62/72] Relax matplotlib requirement, add test for latest version --- .github/workflows/python-latest.yml | 40 +++++++++++++++++++++++++++++ requirements.txt | 3 +-- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/python-latest.yml diff --git a/.github/workflows/python-latest.yml b/.github/workflows/python-latest.yml new file mode 100644 index 00000000..b4c183a4 --- /dev/null +++ b/.github/workflows/python-latest.yml @@ -0,0 +1,40 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python application + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + build-latest: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.11"] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + pip install . + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + pytest tests/all diff --git a/requirements.txt b/requirements.txt index beeb1863..cdcdd08b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,8 +7,7 @@ folium~=0.12.1 geopy~=1.21.0 # TODO To be updated? Jinja2~=3.1.2 -# TODO To be updated? -matplotlib~=3.2.1 +matplotlib # TODO To be updated? missingno~=0.5.2 numpy~=1.25.2 From a4a8a450a5654a01d0f1d9c2fef9c367f0b6d582 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 13:43:40 +0200 Subject: [PATCH 63/72] Fix iplot import and stats linregress --- .github/python-latest.yml | 40 ------------------------------- scdata/device/process/baseline.py | 2 +- scdata/device/process/formulae.py | 2 +- scdata/test/plot/__init__.py | 2 +- 4 files changed, 3 insertions(+), 43 deletions(-) delete mode 100644 .github/python-latest.yml diff --git a/.github/python-latest.yml b/.github/python-latest.yml deleted file mode 100644 index b4c183a4..00000000 --- a/.github/python-latest.yml +++ /dev/null @@ -1,40 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Python application - -on: - push: - branches: [ "master" ] - pull_request: - branches: [ "master" ] - -jobs: - build-latest: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.11"] - steps: - - uses: actions/checkout@v2 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - cache: 'pip' - - name: Display Python version - run: python -c "import sys; print(sys.version)" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - pip install . - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - pytest tests/all diff --git a/scdata/device/process/baseline.py b/scdata/device/process/baseline.py index 0ca0d077..9d1efe68 100644 --- a/scdata/device/process/baseline.py +++ b/scdata/device/process/baseline.py @@ -1,5 +1,5 @@ from numpy import ones, transpose, log -from scipy.stats.stats import linregress +from scipy.stats import linregress from scipy.sparse import (diags, spdiags) from scipy.sparse.linalg import spsolve from pandas import date_range diff --git a/scdata/device/process/formulae.py b/scdata/device/process/formulae.py index 8ad8efe1..e2a5eb60 100644 --- a/scdata/device/process/formulae.py +++ b/scdata/device/process/formulae.py @@ -1,5 +1,5 @@ from numpy import exp, log, transpose -from scipy.stats.stats import linregress +from scipy.stats import linregress # TODO REVIEW def absolute_humidity(dataframe, **kwargs): diff --git a/scdata/test/plot/__init__.py b/scdata/test/plot/__init__.py index a9495e2a..e92b8115 100644 --- a/scdata/test/plot/__init__.py +++ b/scdata/test/plot/__init__.py @@ -8,7 +8,7 @@ # from .scatter_iplot import scatter_iplot from .ts_scatter import ts_scatter from .heatmap_plot import heatmap_plot -from .heatmap_iplot import heatmap_iplot +# from .heatmap_iplot import heatmap_iplot from .box_plot import box_plot from .ts_dendrogram import ts_dendrogram from .maps import device_metric_map, path_plot From 7ef82d00f9aa59e069ab04a890116bbae06c7a00 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 13:53:32 +0200 Subject: [PATCH 64/72] Matplotlib plot styles fix and flask requirement --- requirements.txt | 3 ++- scdata/_config/config.py | 2 +- scdata/test/plot/scatter_dispersion_grid.py | 2 +- scdata/test/plot/ts_dispersion_grid.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index cdcdd08b..b954967b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ # TODO To be updated? branca~=0.4.0 -# Flask, # TODO Add once finished with file reports +# TODO Add once finished with file reports +Flask~=2.2.2 # TODO To be updated? folium~=0.12.1 # TODO To be updated? diff --git a/scdata/_config/config.py b/scdata/_config/config.py index 828c8b05..d6b959a3 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -181,7 +181,7 @@ class Config(object): '#d1e5f0','#fddbc7','#f4a582','#d6604d', '#b2182b','#67001f']) - _plot_style = "seaborn-whitegrid" + _plot_style = "seaborn-v0_8-whitegrid" _ts_plot_def_fmt = { 'mpl': { diff --git a/scdata/test/plot/scatter_dispersion_grid.py b/scdata/test/plot/scatter_dispersion_grid.py index d5a6cf5e..230f59cd 100644 --- a/scdata/test/plot/scatter_dispersion_grid.py +++ b/scdata/test/plot/scatter_dispersion_grid.py @@ -5,7 +5,7 @@ from math import ceil from matplotlib import gridspec -plt.style.use('seaborn-white') +plt.style.use(config._plot_style) def scatter_dispersion_grid(self, **kwargs): ''' diff --git a/scdata/test/plot/ts_dispersion_grid.py b/scdata/test/plot/ts_dispersion_grid.py index b6ccfdf2..aab36de5 100644 --- a/scdata/test/plot/ts_dispersion_grid.py +++ b/scdata/test/plot/ts_dispersion_grid.py @@ -5,7 +5,7 @@ from math import ceil from matplotlib import gridspec -plt.style.use('seaborn-white') +plt.style.use(config._plot_style) def ts_dispersion_grid(self, **kwargs): ''' From 28000a4526d5e4019f87ce522d672f8a361e8b29 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 14:26:31 +0200 Subject: [PATCH 65/72] Update pydantic version --- scdata/_config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scdata/_config/config.py b/scdata/_config/config.py index d6b959a3..5fe20d3c 100644 --- a/scdata/_config/config.py +++ b/scdata/_config/config.py @@ -534,7 +534,7 @@ def load_blueprints(self, urls): if rjson is None: continue if _nblueprint not in blueprints: - blueprints[_nblueprint] = TypeAdapter(Blueprint).validate_python(rjson).dict() + blueprints[_nblueprint] = TypeAdapter(Blueprint).validate_python(rjson).model_dump() return blueprints From a2806caadfa7d5ca8d65d2dd2aa19e40f11b8e84 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 14:29:47 +0200 Subject: [PATCH 66/72] Fix test tests --- tests/all/test/test_sc_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/all/test/test_sc_test.py b/tests/all/test/test_sc_test.py index d2e49770..02445b0a 100644 --- a/tests/all/test/test_sc_test.py +++ b/tests/all/test/test_sc_test.py @@ -42,8 +42,8 @@ def test_test(): "channel": "NOISE_A", "subplot": 1}, } - figure_mpl = t.ts_plot(traces) - figure_uplot = t.ts_uplot(traces) + figure_mpl = t.ts_plot(traces=traces) + figure_uplot = t.ts_uplot(traces=traces) # Test @@ -52,5 +52,5 @@ def test_test(): assert metric_in_df == True for device in t.devices: assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text - assert figure is not None + assert figure_mpl is not None assert figure_uplot is not None \ No newline at end of file From 82a9335bd04d28d6b7ab00a47942d2a54b11de14 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 14:34:18 +0200 Subject: [PATCH 67/72] Comment uplot test --- tests/all/test/test_sc_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/all/test/test_sc_test.py b/tests/all/test/test_sc_test.py index 02445b0a..6c2ab3b6 100644 --- a/tests/all/test/test_sc_test.py +++ b/tests/all/test/test_sc_test.py @@ -43,7 +43,8 @@ def test_test(): "subplot": 1}, } figure_mpl = t.ts_plot(traces=traces) - figure_uplot = t.ts_uplot(traces=traces) + # TODO Improve this + # figure_uplot = t.ts_uplot(traces=traces) # Test @@ -53,4 +54,4 @@ def test_test(): for device in t.devices: assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text assert figure_mpl is not None - assert figure_uplot is not None \ No newline at end of file + # assert figure_uplot is not None \ No newline at end of file From c1e768e1442c03998c757f2802f074e80da9ff81 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 14:37:09 +0200 Subject: [PATCH 68/72] Fix names for tests --- tests/all/devices/{test_csv_device.py => test_csv_device_all.py} | 0 tests/all/devices/{test_sc_device.py => test_sc_device_all.py} | 0 tests/all/test/{test_sc_test.py => test_sc_test_all.py} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename tests/all/devices/{test_csv_device.py => test_csv_device_all.py} (100%) rename tests/all/devices/{test_sc_device.py => test_sc_device_all.py} (100%) rename tests/all/test/{test_sc_test.py => test_sc_test_all.py} (100%) diff --git a/tests/all/devices/test_csv_device.py b/tests/all/devices/test_csv_device_all.py similarity index 100% rename from tests/all/devices/test_csv_device.py rename to tests/all/devices/test_csv_device_all.py diff --git a/tests/all/devices/test_sc_device.py b/tests/all/devices/test_sc_device_all.py similarity index 100% rename from tests/all/devices/test_sc_device.py rename to tests/all/devices/test_sc_device_all.py diff --git a/tests/all/test/test_sc_test.py b/tests/all/test/test_sc_test_all.py similarity index 100% rename from tests/all/test/test_sc_test.py rename to tests/all/test/test_sc_test_all.py From c5aeeb96a247498ec045de4e4420a43af1044d7c Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 15:32:30 +0200 Subject: [PATCH 69/72] Fix test names --- tests/all/devices/test_csv_device_all.py | 2 +- tests/all/devices/test_sc_device_all.py | 2 +- tests/all/test/test_sc_test_all.py | 2 +- .../devices/test/test_sc_test.py | 40 ------------------- tests/cross-version/devices/test_sc_device.py | 2 +- tests/cross-version/test/test_sc_test.py | 2 +- 6 files changed, 5 insertions(+), 45 deletions(-) delete mode 100644 tests/cross-version/devices/test/test_sc_test.py diff --git a/tests/all/devices/test_csv_device_all.py b/tests/all/devices/test_csv_device_all.py index e6ede997..7ca1a822 100644 --- a/tests/all/devices/test_csv_device_all.py +++ b/tests/all/devices/test_csv_device_all.py @@ -8,7 +8,7 @@ config._log_level = 'DEBUG' config.data['strict_load'] = False -def test_csv_device(): +def test_csv_device_all(): id = 16838 path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..', 'scdata/tools/interim/example.csv') frequency = '5Min' diff --git a/tests/all/devices/test_sc_device_all.py b/tests/all/devices/test_sc_device_all.py index 0d57d362..b405b2b7 100644 --- a/tests/all/devices/test_sc_device_all.py +++ b/tests/all/devices/test_sc_device_all.py @@ -7,7 +7,7 @@ config._log_level = 'DEBUG' config.data['strict_load'] = False -def test_api_device(): +def test_sc_device_all(): id = 16838 frequency = '1Min' uuid = "80e684e5-359f-4755-aec9-30fc0c84415f" diff --git a/tests/all/test/test_sc_test_all.py b/tests/all/test/test_sc_test_all.py index 6c2ab3b6..79fd8a22 100644 --- a/tests/all/test/test_sc_test_all.py +++ b/tests/all/test/test_sc_test_all.py @@ -10,7 +10,7 @@ config._out_level = 'DEBUG' config.data['strict_load'] = False -def test_test(): +def test_sc_test_all(): # Test couple of weirded format dates min_date = '2023-09-20 08:19:10-0700' now = time.localtime() diff --git a/tests/cross-version/devices/test/test_sc_test.py b/tests/cross-version/devices/test/test_sc_test.py deleted file mode 100644 index 93081f54..00000000 --- a/tests/cross-version/devices/test/test_sc_test.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest -import scdata as sc -from scdata.models import Metric -from scdata._config import config -import time -import asyncio -from scdata.tools.date import localise_date - -# Set basic configs -config._out_level = 'DEBUG' -config.data['strict_load'] = False - -def test_test(): - # Test couple of weirded format dates - min_date = '2023-09-20 08:19:10-0700' - now = time.localtime() - devices_list=[16838, 16989] - - # Make test - t = sc.Test(name=f'CHECK_{now.tm_year}-{now.tm_mon}-{now.tm_mday}', - devices=[sc.Device(blueprint='sc_air', - params=sc.APIParams(id=d), - options=sc.DeviceOptions(min_date=min_date)) for d in devices_list], - force_recreate=True) - - load_status = asyncio.run(t.load()) - metric = Metric(name='NOISE_A_SMOOTH', - description='Basic smoothing calculation', - function='rolling_avg', - kwargs= {'name': ['NOISE_A'], 'window_size': 5} - ) - t.get_device(16838).add_metric(metric) - process_status = t.process() - metric_in_df = 'NOISE_A_SMOOTH' in t.get_device(16838).data.columns - - assert process_status == True - assert t.loaded == True - assert metric_in_df == True - for device in t.devices: - assert (localise_date(min_date, 'UTC') < device.data.index[0]), resp.text \ No newline at end of file diff --git a/tests/cross-version/devices/test_sc_device.py b/tests/cross-version/devices/test_sc_device.py index 0d57d362..f9f265f4 100644 --- a/tests/cross-version/devices/test_sc_device.py +++ b/tests/cross-version/devices/test_sc_device.py @@ -7,7 +7,7 @@ config._log_level = 'DEBUG' config.data['strict_load'] = False -def test_api_device(): +def test_sc_device(): id = 16838 frequency = '1Min' uuid = "80e684e5-359f-4755-aec9-30fc0c84415f" diff --git a/tests/cross-version/test/test_sc_test.py b/tests/cross-version/test/test_sc_test.py index 93081f54..0c4ca548 100644 --- a/tests/cross-version/test/test_sc_test.py +++ b/tests/cross-version/test/test_sc_test.py @@ -10,7 +10,7 @@ config._out_level = 'DEBUG' config.data['strict_load'] = False -def test_test(): +def test_sc_test(): # Test couple of weirded format dates min_date = '2023-09-20 08:19:10-0700' now = time.localtime() From f01f1359727f6d245d1c901e591621f081418465 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 15:38:53 +0200 Subject: [PATCH 70/72] Update examples --- examples/notebooks/01_getting_started.ipynb | 29 +++++++++++++++++-- .../notebooks/03_data_visualisation.ipynb | 2 +- examples/notebooks/04_processing_data.ipynb | 11 +++++-- 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/examples/notebooks/01_getting_started.ipynb b/examples/notebooks/01_getting_started.ipynb index 38187a75..d6ad4521 100644 --- a/examples/notebooks/01_getting_started.ipynb +++ b/examples/notebooks/01_getting_started.ipynb @@ -119,7 +119,17 @@ "metadata": {}, "outputs": [], "source": [ - "test = sc.Test(name='EXAMPLE',\n", + "import time\n", + "now = time.localtime()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test = sc.Test(name=f'EXAMPLE_{now.tm_year}-{now.tm_mon}-{now.tm_mday}',\n", " devices=[csv_device, api_device, api_device_blueprint],\n", " force_recreate=True)\n", "test.options.cache=True" @@ -300,7 +310,22 @@ "metadata": {}, "outputs": [], "source": [ - "test.path" + "traces = {\n", + " \"1\": {\"devices\": 16871,\n", + " \"channel\": \"NOISE_A\",\n", + " \"subplot\": 1},\n", + " }\n", + "\n", + "figure = test.ts_plot(traces = traces)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "figure is None" ] }, { diff --git a/examples/notebooks/03_data_visualisation.ipynb b/examples/notebooks/03_data_visualisation.ipynb index e6e4b29d..759253d3 100644 --- a/examples/notebooks/03_data_visualisation.ipynb +++ b/examples/notebooks/03_data_visualisation.ipynb @@ -337,7 +337,7 @@ " \"1\": {\"devices\": 16871,\n", " \"channel\": \"TEMP\"},\n", " \"2\": {\"devices\": 16871,\n", - " \"channel\": \"TEMP\"} \n", + " \"channel\": \"NOISE_A\"}\n", " }\n", "\n", "options = {\n", diff --git a/examples/notebooks/04_processing_data.ipynb b/examples/notebooks/04_processing_data.ipynb index f365849c..90d0fc2c 100644 --- a/examples/notebooks/04_processing_data.ipynb +++ b/examples/notebooks/04_processing_data.ipynb @@ -290,7 +290,7 @@ }, "outputs": [], "source": [ - "test.get_device(16871).data.loc[:,['TEMP', 'PRESS', 'TP_Poly']]" + "test.get_device(16871).data.loc[:,['TEMP', 'PRESS', 'TP_Poly', 'METRIC']]" ] }, { @@ -427,7 +427,7 @@ " 'channel': 'NOISE_A',\n", " 'subplot': 1},\n", " 2: {'devices': 16871,\n", - " 'channel': 'NOISE_A_SMOOTH',\n", + " 'channel': 'NOISE_A_OUTLIERS',\n", " 'subplot': 1},\n", " 3: {'devices': 16871,\n", " 'channel': 'NOISE_A_SMOOTH_10',\n", @@ -538,6 +538,13 @@ "}\n", "test.ts_uplot(traces = traces, options = options)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From c66753a43f5ea0d427ed107fd90303774dd3c0e1 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 15:44:52 +0200 Subject: [PATCH 71/72] Remove makefile --- Makefile | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 Makefile diff --git a/Makefile b/Makefile deleted file mode 100644 index 1e95bb33..00000000 --- a/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -.PHONY: package release - -all: package - -test: - pytest - -clean: - rm -rf dist/* - rm -rf build/* - -package: clean - git add -p setup.py scdata/__init__.py - RELEASE=$(python setup.py --version) && git commit -m "Version $RELEASE" && git tag -a v$RELEASE -m "Version $RELEASE" - python setup.py sdist bdist_wheel - -release: - # Still testing - git push - git push --tags From 15d672c40dfea5a0e5cf4a869f52d138d3d3b695 Mon Sep 17 00:00:00 2001 From: oscgonfer Date: Thu, 9 May 2024 15:45:26 +0200 Subject: [PATCH 72/72] Bump to 1.0.0 version --- scdata/__init__.py | 2 +- setup.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scdata/__init__.py b/scdata/__init__.py index a64c53db..1216c5b0 100644 --- a/scdata/__init__.py +++ b/scdata/__init__.py @@ -3,4 +3,4 @@ from .test import Test from .models import Source, TestOptions, DeviceOptions, APIParams, FileParams, CSVParams -__version__ = '0.9.1' +__version__ = '1.0.0' diff --git a/setup.py b/setup.py index 8b70a7c6..e33ab074 100755 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ setup( name='scdata', - version='0.9.1', + version='1.0.0', description='Analysis of sensors and time series data', author='oscgonfer', license='GNU-GPL3.0', @@ -40,7 +40,7 @@ ], install_requires=[REQUIREMENTS], setup_requires=['wheel'], - python_requires=">=3.6", + python_requires=">=3.9", include_package_data=True, zip_safe=False )