From 6b2251fbcc7b2ea35de368fabba1407c3d676019 Mon Sep 17 00:00:00 2001 From: romainsacchi Date: Sat, 29 Jul 2023 17:30:34 +0000 Subject: [PATCH] Black reformating --- premise/cement.py | 14 +++---- premise/direct_air_capture.py | 5 ++- premise/ecoinvent_modification.py | 58 +++++++++++++++++++--------- premise/electricity.py | 33 ++++++++-------- premise/emissions.py | 12 +++--- premise/export.py | 64 ++++++++++++++++++++++--------- premise/fuels.py | 45 ++++++++++++---------- premise/inventory_imports.py | 2 +- premise/logger.py | 8 ++-- premise/report.py | 6 +-- premise/steel.py | 7 ++-- premise/transport.py | 3 +- 12 files changed, 156 insertions(+), 101 deletions(-) diff --git a/premise/cement.py b/premise/cement.py index 10226b6a..3df8ae8e 100644 --- a/premise/cement.py +++ b/premise/cement.py @@ -14,9 +14,9 @@ import yaml +from .logger import create_logger from .transformation import BaseTransformation, Dict, IAMDataCollection, List, np, ws from .utils import DATA_DIR -from .logger import create_logger logger = create_logger("cement") @@ -464,9 +464,9 @@ def add_datasets_to_database(self) -> None: :return: Does not return anything. Modifies in place. """ - #print("Start integration of cement data...") + # print("Start integration of cement data...") - #print("Create new clinker production datasets and delete old datasets") + # print("Create new clinker production datasets and delete old datasets") clinker_prod_datasets = list(self.build_clinker_production_datasets().values()) self.database.extend(clinker_prod_datasets) @@ -486,7 +486,7 @@ def add_datasets_to_database(self) -> None: ) ) - #print("Create new clinker market datasets and delete old datasets") + # print("Create new clinker market datasets and delete old datasets") clinker_market_datasets = list( self.fetch_proxies( name="market for clinker", @@ -512,7 +512,7 @@ def add_datasets_to_database(self) -> None: ) ) - #print("Create new cement market datasets") + # print("Create new cement market datasets") # cement markets markets = ws.get_many( @@ -553,10 +553,10 @@ def add_datasets_to_database(self) -> None: self.database.extend(new_datasets) - #print( + # print( # "Create new cement production datasets and " # "adjust electricity consumption" - #) + # ) # cement production production = ws.get_many( self.database, diff --git a/premise/direct_air_capture.py b/premise/direct_air_capture.py index e25e299c..011fc0a5 100644 --- a/premise/direct_air_capture.py +++ b/premise/direct_air_capture.py @@ -9,8 +9,8 @@ import wurst import yaml -from .utils import DATA_DIR from .logger import create_logger +from .utils import DATA_DIR logger = create_logger("dac") @@ -58,6 +58,7 @@ def _update_dac(scenario, version, system_model, modified_datasets): return scenario, modified_datasets + class DirectAirCapture(BaseTransformation): """ Class that modifies DAC and DACCS inventories and markets @@ -105,7 +106,7 @@ def generate_dac_activities(self) -> None: modifies the original datasets to include the heat source, and adds the modified datasets to the database. """ - #print("Generate region-specific direct air capture processes.") + # print("Generate region-specific direct air capture processes.") # get original dataset for ds_list in self.carbon_storage.values(): diff --git a/premise/ecoinvent_modification.py b/premise/ecoinvent_modification.py index 17d67ee8..428abdf9 100644 --- a/premise/ecoinvent_modification.py +++ b/premise/ecoinvent_modification.py @@ -6,12 +6,12 @@ import copy import multiprocessing -from multiprocessing.pool import ThreadPool as Pool -from multiprocessing import Pool as ProcessPool import os import pickle import sys from datetime import date +from multiprocessing import Pool as ProcessPool +from multiprocessing.pool import ThreadPool as Pool from pathlib import Path from typing import List, Union @@ -27,10 +27,10 @@ from .emissions import _update_emissions from .export import ( Export, + _prepare_database, build_datapackage, generate_scenario_factor_file, generate_superstructure_db, - _prepare_database, ) from .external import ExternalScenario from .external_data_validation import check_external_scenarios, check_inventories @@ -400,17 +400,41 @@ def check_time_horizon(time_horizon: int) -> int: return int(time_horizon) -def _update_all(scenario, version, system_model, modified_datasets, use_absolute_efficiency, vehicle_type, gains_scenario): - scenario, modified_datasets = _update_vehicles(scenario, vehicle_type, version, system_model, modified_datasets) - scenario, modified_datasets = _update_electricity(scenario, version, system_model, modified_datasets, use_absolute_efficiency) - scenario, modified_datasets = _update_dac(scenario, version, system_model, modified_datasets) - scenario, modified_datasets = _update_cement(scenario, version, system_model, modified_datasets) - scenario, modified_datasets = _update_steel(scenario, version, system_model, modified_datasets) - scenario, modified_datasets = _update_fuels(scenario, version, system_model, modified_datasets) - scenario, modified_datasets = _update_emissions(scenario, version, system_model, gains_scenario, modified_datasets) + +def _update_all( + scenario, + version, + system_model, + modified_datasets, + use_absolute_efficiency, + vehicle_type, + gains_scenario, +): + scenario, modified_datasets = _update_vehicles( + scenario, vehicle_type, version, system_model, modified_datasets + ) + scenario, modified_datasets = _update_electricity( + scenario, version, system_model, modified_datasets, use_absolute_efficiency + ) + scenario, modified_datasets = _update_dac( + scenario, version, system_model, modified_datasets + ) + scenario, modified_datasets = _update_cement( + scenario, version, system_model, modified_datasets + ) + scenario, modified_datasets = _update_steel( + scenario, version, system_model, modified_datasets + ) + scenario, modified_datasets = _update_fuels( + scenario, version, system_model, modified_datasets + ) + scenario, modified_datasets = _update_emissions( + scenario, version, system_model, gains_scenario, modified_datasets + ) return scenario, modified_datasets + class NewDatabase: """ Class that represents a new wurst inventory database, modified according to IAM data. @@ -504,7 +528,7 @@ def __init__( self.database = self.__find_cached_db( source_db, keep_uncertainty_data=keep_uncertainty_data ) - #print("Done!") + # print("Done!") else: self.database = self.__clean_database( keep_uncertainty_data=keep_uncertainty_data @@ -523,7 +547,7 @@ def __init__( data = self.__import_additional_inventories(self.additional_inventories) self.database.extend(data) - #print("Done!") + # print("Done!") print("\n/////////////////////// EXTRACTING IAM DATA ////////////////////////") @@ -551,7 +575,7 @@ def _fetch_iam_data(scenario): with Pool(processes=multiprocessing.cpu_count()) as pool: pool.map(_fetch_iam_data, self.scenarios) - #print("Done!") + # print("Done!") def __find_cached_db(self, db_name: str, keep_uncertainty_data: bool) -> List[dict]: """ @@ -710,7 +734,7 @@ def __import_inventories(self, keep_uncertainty_data: bool = False) -> List[dict self.database.extend(datasets) - #print("Done!\n") + # print("Done!\n") return data def __import_additional_inventories( @@ -809,7 +833,6 @@ def update_dac(self) -> None: self.scenarios[s] = results[s][0] self.modified_datasets.update(results[s][1]) - def update_fuels(self) -> None: """ This method will update the fuels inventories @@ -1362,8 +1385,7 @@ def write_datapackage(self, name: str = f"datapackage_{date.today()}"): cached_inventories.extend(extra_inventories) list_scenarios = ["original"] + [ - f"{s['model']} - {s['pathway']} - {s['year']}" - for s in self.scenarios + f"{s['model']} - {s['pathway']} - {s['year']}" for s in self.scenarios ] build_datapackage( diff --git a/premise/electricity.py b/premise/electricity.py index aa1f5dd2..120e2de0 100644 --- a/premise/electricity.py +++ b/premise/electricity.py @@ -20,6 +20,7 @@ from . import VARIABLES_DIR from .data_collection import get_delimiter from .export import biosphere_flows_dictionary +from .logger import create_logger from .transformation import ( BaseTransformation, Dict, @@ -34,7 +35,6 @@ ws, ) from .utils import DATA_DIR, eidb_label, get_efficiency_solar_photovoltaics -from .logger import create_logger LOSS_PER_COUNTRY = DATA_DIR / "electricity" / "losses_per_country.csv" IAM_BIOMASS_VARS = VARIABLES_DIR / "biomass_variables.yaml" @@ -162,7 +162,9 @@ def get_production_weighted_losses( return {"high": high, "medium": medium, "low": low} -def _update_electricity(scenario, version, system_model, modified_datasets, use_absolute_efficiency): +def _update_electricity( + scenario, version, system_model, modified_datasets, use_absolute_efficiency +): electricity = Electricity( database=scenario["database"], iam_data=scenario["iam data"], @@ -204,6 +206,7 @@ def _update_electricity(scenario, version, system_model, modified_datasets, use_ return scenario, modified_datasets + class Electricity(BaseTransformation): """ Class that modifies electricity markets in the database based on IAM output data. @@ -1224,7 +1227,7 @@ def update_efficiency_of_solar_pv(self) -> None: :return: """ - #print("Update efficiency of solar PV panels.") + # print("Update efficiency of solar PV panels.") # TODO: check if IAM data provides efficiencies for PV panels and use them instead @@ -1313,7 +1316,7 @@ def update_ng_production_ds(self) -> None: to high pressure natural gas markets. """ - #print("Update natural gas extraction datasets.") + # print("Update natural gas extraction datasets.") countries = ["NL", "DE", "FR", "RER", "IT", "CH"] @@ -1397,7 +1400,7 @@ def update_ng_production_ds(self) -> None: ) def create_biomass_markets(self) -> None: - #print("Create biomass markets.") + # print("Create biomass markets.") with open(IAM_BIOMASS_VARS, "r", encoding="utf-8") as stream: biomass_map = yaml.safe_load(stream) @@ -1585,7 +1588,7 @@ def create_biomass_markets(self) -> None: ) # replace biomass inputs - #print("Replace biomass inputs.") + # print("Replace biomass inputs.") for dataset in ws.get_many( self.database, ws.either( @@ -1620,7 +1623,7 @@ def create_region_specific_power_plants(self): """ - #print("Create region-specific power plants.") + # print("Create region-specific power plants.") all_plants = [] techs = [ @@ -1742,7 +1745,7 @@ def update_electricity_efficiency(self) -> None: :rtype: list """ - #print("Adjust efficiency of power plants...") + # print("Adjust efficiency of power plants...") mapping = InventorySet(self.database) self.fuel_map = mapping.generate_fuel_map() @@ -1764,7 +1767,7 @@ def update_electricity_efficiency(self) -> None: for technology in technologies_map: dict_technology = technologies_map[technology] - #print("Rescale inventories and emissions for", technology) + # print("Rescale inventories and emissions for", technology) for dataset in ws.get_many( self.database, @@ -1868,7 +1871,7 @@ def adjust_coal_power_plant_emissions(self) -> None: including coal-fired CHPs. """ - #print("Adjust efficiency and emissions of coal power plants...") + # print("Adjust efficiency and emissions of coal power plants...") coal_techs = [ "Coal PC", @@ -2041,7 +2044,7 @@ def update_electricity_markets(self) -> None: ] # We first need to empty 'market for electricity' and 'market group for electricity' datasets - #print("Empty old electricity datasets") + # print("Empty old electricity datasets") datasets_to_empty = ws.get_many( self.database, @@ -2108,14 +2111,14 @@ def update_electricity_markets(self) -> None: ) # We then need to create high voltage IAM electricity markets - #print("Create high voltage markets.") + # print("Create high voltage markets.") self.create_new_markets_high_voltage() - #print("Create medium voltage markets.") + # print("Create medium voltage markets.") self.create_new_markets_medium_voltage() - #print("Create low voltage markets.") + # print("Create low voltage markets.") self.create_new_markets_low_voltage() - #print("Done!") + # print("Done!") def write_log(self, dataset, status="created"): """ diff --git a/premise/emissions.py b/premise/emissions.py index df275862..9d022760 100644 --- a/premise/emissions.py +++ b/premise/emissions.py @@ -14,6 +14,7 @@ import yaml from numpy import ndarray +from .logger import create_logger from .transformation import ( BaseTransformation, Dict, @@ -24,7 +25,6 @@ ws, ) from .utils import DATA_DIR -from .logger import create_logger logger = create_logger("emissions") @@ -32,7 +32,6 @@ GAINS_SECTORS = DATA_DIR / "GAINS_emission_factors" / "GAINS_EU_sectors_mapping.yaml" - def fetch_mapping(filepath: str) -> dict: """Returns a dictionary from a YML file""" @@ -41,7 +40,9 @@ def fetch_mapping(filepath: str) -> dict: return mapping -def _update_emissions(scenario, version, system_model, gains_scenario, modified_datasets): +def _update_emissions( + scenario, version, system_model, gains_scenario, modified_datasets +): emissions = Emissions( database=scenario["database"], year=scenario["year"], @@ -59,6 +60,7 @@ def _update_emissions(scenario, version, system_model, gains_scenario, modified_ return scenario, modified_datasets + class Emissions(BaseTransformation): """ Class that modifies emissions of hot pollutants @@ -128,7 +130,7 @@ def prepare_data(self, data): return data def update_emissions_in_database(self): - #print("Integrating GAINS EU emission factors.") + # print("Integrating GAINS EU emission factors.") for ds in self.database: if ( ds["name"] in self.rev_gains_map_EU @@ -143,7 +145,7 @@ def update_emissions_in_database(self): ) self.write_log(ds, status="updated") - #print("Integrating GAINS IAM emission factors.") + # print("Integrating GAINS IAM emission factors.") for ds in self.database: if ( ds["name"] in self.rev_gains_map_IAM diff --git a/premise/export.py b/premise/export.py index 04dd048a..3784e673 100644 --- a/premise/export.py +++ b/premise/export.py @@ -5,13 +5,15 @@ import csv import datetime import json +import multiprocessing as mp import os import re import uuid from collections import defaultdict from functools import lru_cache +from multiprocessing.pool import ThreadPool as Pool from pathlib import Path -from typing import Dict, List, Tuple, Set, Any, Union +from typing import Any, Dict, List, Set, Tuple, Union import numpy as np import pandas as pd @@ -20,8 +22,6 @@ from datapackage import Package from pandas import DataFrame from scipy import sparse as nsp -import multiprocessing as mp -from multiprocessing.pool import ThreadPool as Pool from . import DATA_DIR, __version__ from .data_collection import get_delimiter @@ -628,6 +628,7 @@ def generate_scenario_factor_file(origin_db, scenarios, db_name, version): return df, extra_acts + def generate_new_activities(args): k, v, acts_ind, db_name, version, dict_meta, m = args act = get_act_dict_structure(k, acts_ind, db_name) @@ -639,6 +640,7 @@ def generate_new_activities(args): ) return act + def generate_scenario_difference_file( db_name, origin_db, scenarios, version ) -> tuple[DataFrame, list[dict], set[Any]]: @@ -679,9 +681,20 @@ def generate_scenario_difference_file( for db in list_dbs: for a in db: key = (a["name"], a["reference product"], None, a["location"], a["unit"]) - dict_meta[key] = {b: c for b, c in a.items() if - b not in ["exchanges", "code", "name", "reference product", "location", "unit", - "database"]} + dict_meta[key] = { + b: c + for b, c in a.items() + if b + not in [ + "exchanges", + "code", + "name", + "reference product", + "location", + "unit", + "database", + ] + } for i, db in enumerate(list_dbs): for ds in db: @@ -715,7 +728,13 @@ def generate_scenario_difference_file( inds_d[ind[0]].append(ind[1]) with Pool(processes=mp.cpu_count()) as pool: - new_db = pool.map(generate_new_activities, [(k, v, acts_ind, db_name, version, dict_meta, m) for k, v in inds_d.items()]) + new_db = pool.map( + generate_new_activities, + [ + (k, v, acts_ind, db_name, version, dict_meta, m) + for k, v in inds_d.items() + ], + ) inds_std = sparse.argwhere((m[..., 1:] == m[..., 0, None]).all(axis=-1).T == False) @@ -802,8 +821,12 @@ def generate_scenario_difference_file( df["to categories"] = None df = df.replace({"None": None, np.nan: None}) - df.loc[df["flow type"] == "biosphere", ["from reference product", "from location"]] = None - df.loc[df["flow type"].isin(["technosphere", "production"]), "from categories"] = None + df.loc[ + df["flow type"] == "biosphere", ["from reference product", "from location"] + ] = None + df.loc[ + df["flow type"].isin(["technosphere", "production"]), "from categories" + ] = None df.loc[df["flow type"] == "production", list_scenarios] = 1.0 # return the dataframe and the new db @@ -899,23 +922,20 @@ def prepare_db_for_export( ) # we ensure the absence of duplicate datasets - #print("- check for duplicates...") + # print("- check for duplicates...") base.database = check_for_duplicates(base.database) # we check the format of numbers - #print("- check for values format...") + # print("- check for values format...") base.database = check_database_name(data=base.database, name=name) base.database = remove_unused_fields(base.database) base.database = correct_fields_format(base.database) base.database = check_amount_format(base.database) # we relink "dead" exchanges - #print("- relinking exchanges...") + # print("- relinking exchanges...") base.relink_datasets( - excludes_datasets=[ - "cobalt industry", - "market group for electricity" - ], + excludes_datasets=["cobalt industry", "market group for electricity"], alt_names=[ "market group for electricity, high voltage", "market group for electricity, medium voltage", @@ -926,12 +946,14 @@ def prepare_db_for_export( ], ) - #print("Done!") + # print("Done!") return base.database, base.cache -def _prepare_database(scenario, scenario_cache, version, system_model, modified_datasets): +def _prepare_database( + scenario, scenario_cache, version, system_model, modified_datasets +): scenario["database"], scenario_cache = prepare_db_for_export( scenario, cache=scenario_cache, @@ -943,6 +965,7 @@ def _prepare_database(scenario, scenario_cache, version, system_model, modified_ return scenario, scenario_cache + class Export: """ Class that exports the transformed data into matrices: @@ -964,7 +987,10 @@ class Export: """ def __init__( - self, scenario: dict = None, filepath: Union[list[Path], list[Union[Path, Any]]] = None, version: str = None + self, + scenario: dict = None, + filepath: Union[list[Path], list[Union[Path, Any]]] = None, + version: str = None, ): self.db = scenario["database"] self.model = scenario["model"] diff --git a/premise/fuels.py b/premise/fuels.py index fcaf32ce..e3fc1c4b 100644 --- a/premise/fuels.py +++ b/premise/fuels.py @@ -4,9 +4,9 @@ import copy import logging.config +from functools import lru_cache from pathlib import Path from typing import Union -from functools import lru_cache import wurst import xarray as xr @@ -15,6 +15,7 @@ from . import VARIABLES_DIR from .inventory_imports import get_biosphere_code +from .logger import create_logger from .transformation import ( Any, BaseTransformation, @@ -30,7 +31,6 @@ ws, ) from .utils import DATA_DIR, get_crops_properties -from .logger import create_logger logger = create_logger("fuel") @@ -180,7 +180,7 @@ def update_co2_emissions( exc for exc in dataset["exchanges"] if exc["name"] == "Carbon dioxide, fossil" ): pass - #print(f"{dataset['name']} has no fossil CO2 output.") + # print(f"{dataset['name']} has no fossil CO2 output.") if "log parameters" not in dataset: dataset["log parameters"] = {} @@ -287,13 +287,13 @@ def _update_fuels(scenario, version, system_model, modified_datasets): ) if any( - x is not None - for x in ( - scenario["iam data"].petrol_markets, - scenario["iam data"].diesel_markets, - scenario["iam data"].gas_markets, - scenario["iam data"].hydrogen_markets, - ) + x is not None + for x in ( + scenario["iam data"].petrol_markets, + scenario["iam data"].diesel_markets, + scenario["iam data"].gas_markets, + scenario["iam data"].hydrogen_markets, + ) ): fuels.generate_fuel_markets() scenario["database"] = fuels.database @@ -303,6 +303,7 @@ def _update_fuels(scenario, version, system_model, modified_datasets): return scenario, modified_datasets + class Fuels(BaseTransformation): """ Class that modifies fuel inventories and markets in ecoinvent based on IAM output data. @@ -626,7 +627,7 @@ def generate_hydrogen_activities(self) -> None: self.database.extend(new_ds.values()) - #print("Generate region-specific hydrogen supply chains.") + # print("Generate region-specific hydrogen supply chains.") # loss coefficients for hydrogen supply losses = fetch_mapping(HYDROGEN_SUPPLY_LOSSES) @@ -2013,19 +2014,19 @@ def generate_fuel_supply_chains(self): """Duplicate fuel chains and make them IAM region-specific""" # hydrogen - #print("Generate region-specific hydrogen production pathways.") + # print("Generate region-specific hydrogen production pathways.") self.generate_hydrogen_activities() # biogas - #print("Generate region-specific biogas and syngas supply chains.") + # print("Generate region-specific biogas and syngas supply chains.") self.generate_biogas_activities() # synthetic fuels - #print("Generate region-specific synthetic fuel supply chains.") + # print("Generate region-specific synthetic fuel supply chains.") self.generate_synthetic_fuel_activities() # biofuels - #print("Generate region-specific biofuel supply chains.") + # print("Generate region-specific biofuel supply chains.") self.generate_biofuel_activities() def generate_world_fuel_market( @@ -2311,7 +2312,7 @@ def generate_fuel_markets(self): # Create new fuel supply chains self.generate_fuel_supply_chains() - #print("Generate new fuel markets.") + # print("Generate new fuel markets.") # we start by creating region-specific "diesel, burned in" markets new_datasets = [] @@ -2405,7 +2406,7 @@ def generate_fuel_markets(self): for i in vars_map[fuel] if i in e ]: - #print(f"--> {fuel}") + # print(f"--> {fuel}") prod_vars = [ v @@ -2539,9 +2540,7 @@ def generate_fuel_markets(self): for ds in ws.get_many( self.database, - ws.exclude( - ws.either(*[ws.equals("name", i) for i in datasets_to_empty]) - ), + ws.exclude(ws.either(*[ws.equals("name", i) for i in datasets_to_empty])), ): for exc in ws.technosphere( ds, @@ -2550,7 +2549,11 @@ def generate_fuel_markets(self): new_supplier = datasets_to_empty[exc["name"]] exc["name"] = new_supplier[0] exc["product"] = new_supplier[1] - exc["location"] = ds["location"] if ds["location"] in self.regions else self.ecoinvent_to_iam_loc[ds["location"]] + exc["location"] = ( + ds["location"] + if ds["location"] in self.regions + else self.ecoinvent_to_iam_loc[ds["location"]] + ) self.relink_activities_to_new_markets() print("Done!") diff --git a/premise/inventory_imports.py b/premise/inventory_imports.py index fb131513..83a18253 100644 --- a/premise/inventory_imports.py +++ b/premise/inventory_imports.py @@ -787,7 +787,7 @@ def prepare_inventory(self): def merge_inventory(self): self.database.extend(self.import_db.data) - #print("Done!") + # print("Done!") return self.database diff --git a/premise/logger.py b/premise/logger.py index 6dc4dc0b..f2aa3beb 100644 --- a/premise/logger.py +++ b/premise/logger.py @@ -1,7 +1,10 @@ -from . import DATA_DIR +import logging.config +import multiprocessing from pathlib import Path + import yaml -import multiprocessing, logging.config + +from . import DATA_DIR LOG_CONFIG = DATA_DIR / "utils" / "logging" / "logconfig.yaml" DIR_LOG_REPORT = Path.cwd() / "export" / "logs" @@ -20,5 +23,4 @@ def create_logger(handler): logger = logging.getLogger(handler) - return logger diff --git a/premise/report.py b/premise/report.py index 0885eee1..dced5b6b 100644 --- a/premise/report.py +++ b/premise/report.py @@ -5,7 +5,6 @@ import os from datetime import datetime from pathlib import Path -from pandas._libs.parsers import ParserError import openpyxl import pandas as pd @@ -16,6 +15,7 @@ from openpyxl.utils import get_column_letter from openpyxl.utils.dataframe import dataframe_to_rows from openpyxl.worksheet.dimensions import ColumnDimension, DimensionHolder +from pandas._libs.parsers import ParserError from pandas.errors import EmptyDataError from . import DATA_DIR, VARIABLES_DIR, __version__ @@ -549,7 +549,3 @@ def convert_log_to_excel_file(filepath): except EmptyDataError: # return an empty dataframe return pd.DataFrame(columns=fetch_columns(filepath)) - - - - diff --git a/premise/steel.py b/premise/steel.py index 95f24e05..ae8f94ce 100644 --- a/premise/steel.py +++ b/premise/steel.py @@ -9,9 +9,9 @@ import yaml from .data_collection import IAMDataCollection +from .logger import create_logger from .transformation import BaseTransformation, ws from .utils import DATA_DIR -from .logger import create_logger logger = create_logger("steel") @@ -37,6 +37,7 @@ def _update_steel(scenario, version, system_model, modified_datasets): return scenario, modified_datasets + class Steel(BaseTransformation): """ Class that modifies steel markets in ecoinvent based on IAM output data. @@ -269,7 +270,7 @@ def create_steel_production_activities(self): """ # Determine all steel activities in the database. Empty old datasets. - #print("Create new steel production datasets and empty old datasets") + # print("Create new steel production datasets and empty old datasets") d_act_primary_steel = { mat: self.fetch_proxies( @@ -350,7 +351,7 @@ def create_pig_iron_production_activities(self): Create region-specific pig iron production activities. """ - #print("Create pig iron production datasets") + # print("Create pig iron production datasets") pig_iron = self.fetch_proxies( name="pig iron production", diff --git a/premise/transport.py b/premise/transport.py index b72751fc..45565da0 100644 --- a/premise/transport.py +++ b/premise/transport.py @@ -34,7 +34,6 @@ def _update_vehicles(scenario, vehicle_type, version, system_model, modified_datasets): - trspt = Transport( database=scenario["database"], year=scenario["year"], @@ -138,7 +137,7 @@ def create_fleet_vehicles( :param regions: IAM regions :return: list of fleet average vehicle datasets """ - #print("Create fleet average vehicles...") + # print("Create fleet average vehicles...") vehicles_map = get_vehicles_mapping()