diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a12d3e2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +/.vscode +/visualized_data/* +/libcity/tmp/* +/libcity/__pycache__ +/libcity/*/__pycache__ +/libcity/data/dataset/__pycache__ +/.idea +/libcity/log/* +/libcity/model/*/__pycache__ +/libcity/data/dataset/*/__pycache__ +/libcity/cache/model_cache/* +/libcity/cache/evaluate_cache/* diff --git a/CHIBike.json b/CHIBike.json new file mode 100644 index 0000000..1d46456 --- /dev/null +++ b/CHIBike.json @@ -0,0 +1,26 @@ +{ + "dataset_class": "PDFormerGridDataset", + "input_window": 6, + "output_window": 1, + "train_rate": 0.7, + "eval_rate": 0.1, + "batch_size": 16, + "add_time_in_day": true, + "add_day_in_week": true, + "use_row_column": false, + "lr_warmup_epoch": 15, + "max_epoch": 200, + "embed_dim": 32, + "far_mask_delta": 3, + "geo_num_heads": 2, + "sem_num_heads": 2, + "t_num_heads": 4, + "cluster_method": "kshape", + "cand_key_days": 14, + "seed": 1, + "type_ln": "pre", + "set_loss": "huber", + "huber_delta": 2, + "mask_val": 5, + "mode": "average" +} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..54f7a74 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 aptx1231 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NYCTaxi.json b/NYCTaxi.json new file mode 100644 index 0000000..d1c2c8b --- /dev/null +++ b/NYCTaxi.json @@ -0,0 +1,24 @@ +{ + "dataset_class": "PDFormerGridDataset", + "input_window": 6, + "output_window": 1, + "train_rate": 0.7, + "eval_rate": 0.1, + "batch_size": 16, + "add_time_in_day": true, + "add_day_in_week": true, + "use_row_column": false, + "max_epoch": 200, + "far_mask_delta": 3, + "geo_num_heads": 2, + "sem_num_heads": 2, + "t_num_heads": 4, + "cluster_method": "kshape", + "cand_key_days": 14, + "seed": 2, + "type_ln": "pre", + "set_loss": "huber", + "huber_delta": 2, + "mask_val": 10, + "mode": "average" +} \ No newline at end of file diff --git a/PeMS04.json b/PeMS04.json new file mode 100644 index 0000000..93ddf72 --- /dev/null +++ b/PeMS04.json @@ -0,0 +1,24 @@ +{ + "dataset_class": "PDFormerDataset", + "input_window": 12, + "output_window": 12, + "train_rate": 0.6, + "eval_rate": 0.2, + "batch_size": 16, + "add_time_in_day": true, + "add_day_in_week": true, + "step_size": 1274, + "max_epoch": 200, + "bidir": true, + "far_mask_delta": 7, + "geo_num_heads": 4, + "sem_num_heads": 2, + "t_num_heads": 2, + "cluster_method": "kshape", + "cand_key_days": 14, + "seed": 1, + "type_ln": "pre", + "set_loss": "huber", + "huber_delta": 2, + "mode": "average" +} \ No newline at end of file diff --git a/PeMS07.json b/PeMS07.json new file mode 100644 index 0000000..5ab78dc --- /dev/null +++ b/PeMS07.json @@ -0,0 +1,25 @@ +{ + "dataset_class": "PDFormerDataset", + "input_window": 12, + "output_window": 12, + "train_rate": 0.6, + "eval_rate": 0.2, + "batch_size": 8, + "grad_accmu_steps": 2, + "add_time_in_day": true, + "add_day_in_week": true, + "step_size": 4232, + "max_epoch": 200, + "bidir": true, + "far_mask_delta": 7, + "geo_num_heads": 4, + "sem_num_heads": 2, + "t_num_heads": 2, + "cluster_method": "kshape", + "cand_key_days": 14, + "seed": 1, + "type_ln": "pre", + "set_loss": "huber", + "huber_delta": 2, + "mode": "average" +} \ No newline at end of file diff --git a/PeMS08.json b/PeMS08.json new file mode 100644 index 0000000..4f191b7 --- /dev/null +++ b/PeMS08.json @@ -0,0 +1,24 @@ +{ + "dataset_class": "PDFormerDataset", + "input_window": 12, + "output_window": 12, + "train_rate": 0.6, + "eval_rate": 0.2, + "batch_size": 16, + "add_time_in_day": true, + "add_day_in_week": true, + "step_size": 2776, + "max_epoch": 200, + "bidir": true, + "far_mask_delta": 7, + "geo_num_heads": 4, + "sem_num_heads": 2, + "t_num_heads": 2, + "cluster_method": "kshape", + "cand_key_days": 21, + "seed": 1, + "type_ln": "pre", + "set_loss": "huber", + "huber_delta": 2, + "mode": "average" +} \ No newline at end of file diff --git a/PeMS08.npy b/PeMS08.npy new file mode 100644 index 0000000..9578ad5 Binary files /dev/null and b/PeMS08.npy differ diff --git a/T-Drive.json b/T-Drive.json new file mode 100644 index 0000000..f49d16d --- /dev/null +++ b/T-Drive.json @@ -0,0 +1,25 @@ +{ + "dataset_class": "PDFormerGridDataset", + "input_window": 6, + "output_window": 1, + "train_rate": 0.7, + "eval_rate": 0.1, + "batch_size": 16, + "add_time_in_day": true, + "add_day_in_week": true, + "use_row_column": false, + "far_mask_delta": 3, + "geo_num_heads": 2, + "sem_num_heads": 2, + "t_num_heads": 4, + "cluster_method": "kshape", + "cand_key_days": 14, + "seed": 42, + "max_epoch": 200, + "type_ln": "pre", + "drop_path": 0, + "set_loss": "huber", + "huber_delta": 2, + "mask_val": 10, + "mode": "average" +} \ No newline at end of file diff --git a/framework.png b/framework.png new file mode 100644 index 0000000..6affed7 Binary files /dev/null and b/framework.png differ diff --git a/libcity/__init__.py b/libcity/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libcity/cache/dataset_cache/dtw_CHIBike.npy b/libcity/cache/dataset_cache/dtw_CHIBike.npy new file mode 100644 index 0000000..69d0a99 Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_CHIBike.npy differ diff --git a/libcity/cache/dataset_cache/dtw_NYCTaxi.npy b/libcity/cache/dataset_cache/dtw_NYCTaxi.npy new file mode 100644 index 0000000..0760e90 Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_NYCTaxi.npy differ diff --git a/libcity/cache/dataset_cache/dtw_PeMS04.npy b/libcity/cache/dataset_cache/dtw_PeMS04.npy new file mode 100644 index 0000000..0949782 Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_PeMS04.npy differ diff --git a/libcity/cache/dataset_cache/dtw_PeMS07.npy b/libcity/cache/dataset_cache/dtw_PeMS07.npy new file mode 100644 index 0000000..eeaa272 Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_PeMS07.npy differ diff --git a/libcity/cache/dataset_cache/dtw_PeMS08.npy b/libcity/cache/dataset_cache/dtw_PeMS08.npy new file mode 100644 index 0000000..998880b Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_PeMS08.npy differ diff --git a/libcity/cache/dataset_cache/dtw_T-Drive.npy b/libcity/cache/dataset_cache/dtw_T-Drive.npy new file mode 100644 index 0000000..307df26 Binary files /dev/null and b/libcity/cache/dataset_cache/dtw_T-Drive.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_CHIBike_14_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_CHIBike_14_3_16_5.npy new file mode 100644 index 0000000..483c979 Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_CHIBike_14_3_16_5.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_NYCTaxi_14_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_NYCTaxi_14_3_16_5.npy new file mode 100644 index 0000000..b6e8a93 Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_NYCTaxi_14_3_16_5.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS04_14_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS04_14_3_16_5.npy new file mode 100644 index 0000000..338402c Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS04_14_3_16_5.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS07_14_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS07_14_3_16_5.npy new file mode 100644 index 0000000..4b96f1e Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS07_14_3_16_5.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS08_21_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS08_21_3_16_5.npy new file mode 100644 index 0000000..0b1ad1b Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_PeMS08_21_3_16_5.npy differ diff --git a/libcity/cache/dataset_cache/pattern_keys_kshape_T-Drive_14_3_16_5.npy b/libcity/cache/dataset_cache/pattern_keys_kshape_T-Drive_14_3_16_5.npy new file mode 100644 index 0000000..f6b92bf Binary files /dev/null and b/libcity/cache/dataset_cache/pattern_keys_kshape_T-Drive_14_3_16_5.npy differ diff --git a/libcity/config/__init__.py b/libcity/config/__init__.py new file mode 100644 index 0000000..6b6c96f --- /dev/null +++ b/libcity/config/__init__.py @@ -0,0 +1,5 @@ +from libcity.config.config_parser import ConfigParser + +__all__ = [ + 'ConfigParser' +] diff --git a/libcity/config/config_parser.py b/libcity/config/config_parser.py new file mode 100644 index 0000000..a09eec0 --- /dev/null +++ b/libcity/config/config_parser.py @@ -0,0 +1,134 @@ +import os +import json +import torch + + +class ConfigParser(object): + + def __init__(self, task, model, dataset, config_file=None, + saved_model=True, train=True, other_args=None, hyper_config_dict=None, initial_ckpt=None): + self.config = {} + self._parse_external_config(task, model, dataset, saved_model, train, other_args, hyper_config_dict, initial_ckpt) + self._parse_config_file(config_file) + self._load_default_config() + self._init_device() + + def _parse_external_config(self, task, model, dataset, + saved_model=True, train=True, other_args=None, hyper_config_dict=None, initial_ckpt=None): + if task is None: + raise ValueError('the parameter task should not be None!') + if model is None: + raise ValueError('the parameter model should not be None!') + if dataset is None: + raise ValueError('the parameter dataset should not be None!') + self.config['task'] = task + self.config['model'] = model + self.config['dataset'] = dataset + self.config['saved_model'] = saved_model + self.config['train'] = False if task == 'map_matching' else train + if other_args is not None: + for key in other_args: + self.config[key] = other_args[key] + if hyper_config_dict is not None: + for key in hyper_config_dict: + self.config[key] = hyper_config_dict[key] + self.config['initial_ckpt'] = initial_ckpt + + def _parse_config_file(self, config_file): + if config_file is not None: + if os.path.exists('./{}.json'.format(config_file)): + with open('./{}.json'.format(config_file), 'r') as f: + x = json.load(f) + for key in x: + if key not in self.config: + self.config[key] = x[key] + else: + raise FileNotFoundError( + 'Config file {}.json is not found. Please ensure \ + the config file is in the root dir and is a JSON \ + file.'.format(config_file)) + + def _load_default_config(self): + with open('./libcity/config/task_config.json', 'r') as f: + task_config = json.load(f) + if self.config['task'] not in task_config: + raise ValueError( + 'task {} is not supported.'.format(self.config['task'])) + task_config = task_config[self.config['task']] + if self.config['model'] not in task_config['allowed_model']: + raise ValueError('task {} do not support model {}'.format( + self.config['task'], self.config['model'])) + model = self.config['model'] + if 'dataset_class' not in self.config: + self.config['dataset_class'] = task_config[model]['dataset_class'] + if self.config['task'] == 'traj_loc_pred' and 'traj_encoder' not in self.config: + self.config['traj_encoder'] = task_config[model]['traj_encoder'] + if 'executor' not in self.config: + self.config['executor'] = task_config[model]['executor'] + if 'evaluator' not in self.config: + self.config['evaluator'] = task_config[model]['evaluator'] + if self.config['model'].upper() in ['LSTM', 'GRU', 'RNN']: + self.config['rnn_type'] = self.config['model'] + self.config['model'] = 'RNN' + default_file_list = [] + default_file_list.append('model/{}/{}.json'.format(self.config['task'], self.config['model'])) + default_file_list.append('data/{}.json'.format(self.config['dataset_class'])) + default_file_list.append('executor/{}.json'.format(self.config['executor'])) + default_file_list.append('evaluator/{}.json'.format(self.config['evaluator'])) + for file_name in default_file_list: + with open('./libcity/config/{}'.format(file_name), 'r') as f: + x = json.load(f) + for key in x: + if key not in self.config: + self.config[key] = x[key] + with open('./raw_data/{}/config.json'.format(self.config['dataset']), 'r') as f: + x = json.load(f) + for key in x: + if key == 'info': + for ik in x[key]: + if ik not in self.config: + self.config[ik] = x[key][ik] + else: + if key not in self.config: + self.config[key] = x[key] + + def _init_device(self): + use_gpu = self.config.get('gpu', True) + distributed = False + if 'WORLD_SIZE' in os.environ: + distributed = int(os.environ['WORLD_SIZE']) > 1 + self.config['distributed'] = distributed + if use_gpu and distributed: + local_rank = self.config['local_rank'] + assert local_rank >= 0 + torch.cuda.set_device(local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + rank = torch.distributed.get_rank() + self.config["rank"] = rank + assert rank >= 0 + self.config['world_size'] = torch.distributed.get_world_size() + self.config['device'] = torch.device( + "cuda:%d" % local_rank if torch.cuda.is_available() else "cpu") + else: + if use_gpu: + torch.cuda.set_device(0) + self.config['device'] = torch.device( + "cuda:0" if torch.cuda.is_available() and use_gpu else "cpu") + + def get(self, key, default=None): + return self.config.get(key, default) + + def __getitem__(self, key): + if key in self.config: + return self.config[key] + else: + raise KeyError('{} is not in the config'.format(key)) + + def __setitem__(self, key, value): + self.config[key] = value + + def __contains__(self, key): + return key in self.config + + def __iter__(self): + return self.config.__iter__() diff --git a/libcity/config/data/PDFormerDataset.json b/libcity/config/data/PDFormerDataset.json new file mode 100644 index 0000000..b21fa10 --- /dev/null +++ b/libcity/config/data/PDFormerDataset.json @@ -0,0 +1,17 @@ +{ + "batch_size": 64, + "cache_dataset": true, + "num_workers": 0, + "pad_with_last_sample": true, + "train_rate": 0.7, + "eval_rate": 0.1, + "scaler": "none", + "load_external": false, + "normal_external": false, + "ext_scaler": "none", + "input_window": 12, + "output_window": 12, + "add_time_in_day": false, + "add_day_in_week": false, + "lape_dim": 8 +} diff --git a/libcity/config/data/PDFormerGridDataset.json b/libcity/config/data/PDFormerGridDataset.json new file mode 100644 index 0000000..a8ca3fa --- /dev/null +++ b/libcity/config/data/PDFormerGridDataset.json @@ -0,0 +1,19 @@ +{ + "batch_size": 64, + "cache_dataset": true, + "num_workers": 0, + "pad_with_last_sample": true, + "train_rate": 0.7, + "eval_rate": 0.1, + "scaler": "none", + "load_external": false, + "normal_external": false, + "ext_scaler": "none", + "input_window": 6, + "output_window": 1, + "output_dim": 2, + "add_time_in_day": false, + "add_day_in_week": false, + "use_row_column": false, + "lape_dim": 8 +} \ No newline at end of file diff --git a/libcity/config/data/TrafficStateDataset.json b/libcity/config/data/TrafficStateDataset.json new file mode 100644 index 0000000..e7e8a95 --- /dev/null +++ b/libcity/config/data/TrafficStateDataset.json @@ -0,0 +1,16 @@ +{ + "batch_size": 64, + "cache_dataset": true, + "num_workers": 0, + "pad_with_last_sample": true, + "train_rate": 0.7, + "eval_rate": 0.1, + "scaler": "none", + "load_external": false, + "normal_external": false, + "ext_scaler": "none", + "input_window": 12, + "output_window": 12, + "add_time_in_day": false, + "add_day_in_week": false +} diff --git a/libcity/config/data/TrafficStateGridDataset.json b/libcity/config/data/TrafficStateGridDataset.json new file mode 100644 index 0000000..3fdf984 --- /dev/null +++ b/libcity/config/data/TrafficStateGridDataset.json @@ -0,0 +1,17 @@ +{ + "batch_size": 64, + "cache_dataset": true, + "num_workers": 0, + "pad_with_last_sample": true, + "train_rate": 0.7, + "eval_rate": 0.1, + "scaler": "none", + "load_external": false, + "normal_external": false, + "ext_scaler": "none", + "input_window": 12, + "output_window": 12, + "add_time_in_day": false, + "add_day_in_week": false, + "use_row_column": true +} diff --git a/libcity/config/data/TrafficStatePointDataset.json b/libcity/config/data/TrafficStatePointDataset.json new file mode 100644 index 0000000..e7e8a95 --- /dev/null +++ b/libcity/config/data/TrafficStatePointDataset.json @@ -0,0 +1,16 @@ +{ + "batch_size": 64, + "cache_dataset": true, + "num_workers": 0, + "pad_with_last_sample": true, + "train_rate": 0.7, + "eval_rate": 0.1, + "scaler": "none", + "load_external": false, + "normal_external": false, + "ext_scaler": "none", + "input_window": 12, + "output_window": 12, + "add_time_in_day": false, + "add_day_in_week": false +} diff --git a/libcity/config/evaluator/TrafficStateEvaluator.json b/libcity/config/evaluator/TrafficStateEvaluator.json new file mode 100644 index 0000000..af8204b --- /dev/null +++ b/libcity/config/evaluator/TrafficStateEvaluator.json @@ -0,0 +1,5 @@ +{ + "metrics": ["MAE", "MAPE", "RMSE", "masked_MAE", "masked_MAPE", "masked_RMSE"], + "mode": "single", + "save_modes": ["csv"] +} diff --git a/libcity/config/evaluator/TrafficStateGridEvaluator.json b/libcity/config/evaluator/TrafficStateGridEvaluator.json new file mode 100644 index 0000000..62b928d --- /dev/null +++ b/libcity/config/evaluator/TrafficStateGridEvaluator.json @@ -0,0 +1,6 @@ +{ + "metrics": ["MAE", "MAPE", "RMSE", "masked_MAE", "masked_MAPE", "masked_RMSE"], + "mode": "single", + "mask_val": 10, + "save_modes": ["csv"] +} \ No newline at end of file diff --git a/libcity/config/executor/AbstractTraditionExecutor.json b/libcity/config/executor/AbstractTraditionExecutor.json new file mode 100644 index 0000000..0e0dcd2 --- /dev/null +++ b/libcity/config/executor/AbstractTraditionExecutor.json @@ -0,0 +1,3 @@ +{ + +} \ No newline at end of file diff --git a/libcity/config/executor/PDFormerExecutor.json b/libcity/config/executor/PDFormerExecutor.json new file mode 100644 index 0000000..43686d8 --- /dev/null +++ b/libcity/config/executor/PDFormerExecutor.json @@ -0,0 +1,36 @@ +{ + "gpu": true, + "gpu_id": 0, + "max_epoch": 100, + "train_loss": "none", + "epoch": 0, + "learner": "adam", + "learning_rate": 0.01, + "weight_decay": 0, + "lr_epsilon": 1e-8, + "lr_beta1": 0.9, + "lr_beta2": 0.999, + "lr_alpha": 0.99, + "lr_momentum": 0, + "lr_decay": false, + "lr_scheduler": "multisteplr", + "lr_decay_ratio": 0.1, + "steps": [5, 20, 40, 70], + "step_size": 10, + "lr_T_max": 30, + "lr_eta_min": 0, + "lr_patience": 10, + "lr_threshold": 1e-4, + "lr_warmup_epoch": 5, + "lr_warmup_init": 1e-6, + "clip_grad_norm": false, + "max_grad_norm": 1.0, + "use_early_stop": false, + "patience": 50, + "log_level": "INFO", + "log_every": 1, + "saved_model": true, + "load_best_epoch": true, + "hyper_tune": false, + "grad_accmu_steps": 1 +} \ No newline at end of file diff --git a/libcity/config/executor/TrafficStateExecutor.json b/libcity/config/executor/TrafficStateExecutor.json new file mode 100644 index 0000000..f167506 --- /dev/null +++ b/libcity/config/executor/TrafficStateExecutor.json @@ -0,0 +1,33 @@ +{ + "gpu": true, + "gpu_id": 0, + "max_epoch": 100, + "train_loss": "none", + "epoch": 0, + "learner": "adam", + "learning_rate": 0.01, + "weight_decay": 0, + "lr_epsilon": 1e-8, + "lr_beta1": 0.9, + "lr_beta2": 0.999, + "lr_alpha": 0.99, + "lr_momentum": 0, + "lr_decay": false, + "lr_scheduler": "multisteplr", + "lr_decay_ratio": 0.1, + "steps": [5, 20, 40, 70], + "step_size": 10, + "lr_T_max": 30, + "lr_eta_min": 0, + "lr_patience": 10, + "lr_threshold": 1e-4, + "clip_grad_norm": false, + "max_grad_norm": 1.0, + "use_early_stop": false, + "patience": 50, + "log_level": "INFO", + "log_every": 1, + "saved_model": true, + "load_best_epoch": true, + "hyper_tune": false +} diff --git a/libcity/config/model/traffic_state_pred/PDFormer.json b/libcity/config/model/traffic_state_pred/PDFormer.json new file mode 100644 index 0000000..124266d --- /dev/null +++ b/libcity/config/model/traffic_state_pred/PDFormer.json @@ -0,0 +1,51 @@ +{ + "embed_dim": 64, + "skip_dim": 256, + "geo_num_heads": 4, + "sem_num_heads": 2, + "t_num_heads": 2, + "mlp_ratio": 4, + "qkv_bias": true, + "drop": 0, + "attn_drop": 0, + "drop_path": 0.3, + "s_attn_size": 3, + "t_attn_size": 1, + "enc_depth": 6, + "type_ln": "post", + "type_short_path": "hop", + + "scaler": "standard", + "load_external": true, + "normal_external": false, + "ext_scaler": "none", + "add_time_in_day": true, + "add_day_in_week": true, + "batch_size": 16, + + "seed": 0, + "max_epoch": 300, + "learner": "adamw", + "learning_rate": 1e-3, + "weight_decay": 0.05, + "lr_decay": true, + "lr_scheduler": "cosinelr", + "lr_eta_min": 1e-4, + "lr_decay_ratio": 0.1, + "lr_warmup_epoch": 5, + "lr_warmup_init": 1e-6, + "clip_grad_norm": true, + "max_grad_norm": 5, + "use_early_stop": true, + "patience": 50, + "step_size": 1562, + "task_level": 0, + "use_curriculum_learning": true, + "random_flip": true, + "huber_delta": 1, + "quan_delta": 0.25, + "bidir": false, + "far_mask_delta": 5, + "dtw_delta": 5, + "set_loss": "masked_mae" +} \ No newline at end of file diff --git a/libcity/config/task_config.json b/libcity/config/task_config.json new file mode 100644 index 0000000..da0762c --- /dev/null +++ b/libcity/config/task_config.json @@ -0,0 +1,16 @@ +{ + "traffic_state_pred": { + "allowed_model": [ + "PDFormer" + ], + "allowed_dataset": [ + "PeMS04", "PeMS07", "PeMS08", + "NYCTaxi", "T-Drive", "CHIBike" + ], + "PDFormer": { + "dataset_class": "PDFormerDataset", + "executor": "PDFormerExecutor", + "evaluator": "TrafficStateEvaluator" + } + } +} diff --git a/libcity/data/__init__.py b/libcity/data/__init__.py new file mode 100644 index 0000000..a45c96d --- /dev/null +++ b/libcity/data/__init__.py @@ -0,0 +1,5 @@ +from libcity.data.utils import get_dataset + +__all__ = [ + "get_dataset" +] diff --git a/libcity/data/batch.py b/libcity/data/batch.py new file mode 100644 index 0000000..6ee1974 --- /dev/null +++ b/libcity/data/batch.py @@ -0,0 +1,98 @@ +import torch +import numpy as np + + +class Batch(object): + + def __init__(self, feature_name, pad_item=None, pad_max_len=None): + self.data = {} + self.pad_len = {} + self.origin_len = {} + self.pad_max_len = pad_max_len if pad_max_len is not None else {} + self.pad_item = pad_item if pad_item is not None else {} + self.feature_name = feature_name + for key in feature_name: + self.data[key] = [] + if key in self.pad_item: + self.pad_len[key] = 0 + self.origin_len[key] = [] + + def __getitem__(self, key): + if key in self.data: + return self.data[key] + else: + raise KeyError('{} is not in the batch'.format(key)) + + def __setitem__(self, key, value): + if key in self.data: + self.data[key] = value + else: + raise KeyError('{} is not in the batch'.format(key)) + + def append(self, item): + if len(item) != len(self.feature_name): + raise KeyError( + 'when append a batch, item is not equal length with \ + feature_name') + for i, key in enumerate(self.feature_name): + self.data[key].append(item[i]) + if key in self.pad_item: + self.origin_len[key].append(len(item[i])) + if self.pad_len[key] < len(item[i]): + self.pad_len[key] = len(item[i]) + + def padding(self): + for key in self.pad_item: + if key not in self.data: + raise KeyError('when pad a batch, raise this error!') + max_len = self.pad_len[key] + if key in self.pad_max_len: + max_len = min(self.pad_max_len[key], max_len) + for i in range(len(self.data[key])): + if len(self.data[key][i]) < max_len: + self.data[key][i] += [self.pad_item[key]] * \ + (max_len - len(self.data[key][i])) + else: + self.data[key][i] = self.data[key][i][-max_len:] + self.origin_len[key][i] = max_len + + def get_origin_len(self, key): + return self.origin_len[key] + + def to_tensor(self, device): + for key in self.data: + if self.feature_name[key] == 'int': + self.data[key] = torch.LongTensor(np.array(self.data[key])).to(device) + elif self.feature_name[key] == 'float': + self.data[key] = torch.FloatTensor(np.array(self.data[key])).to(device) + elif self.feature_name[key] == 'array of int': + for i in range(len(self.data[key])): + for j in range(len(self.data[key][i])): + try: + self.data[key][i][j] = torch.LongTensor(np.array(self.data[key][i][j])).to(device) + except TypeError: + print('device is ', device) + exit() + elif self.feature_name[key] == 'no_pad_int': + for i in range(len(self.data[key])): + self.data[key][i] = torch.LongTensor(np.array(self.data[key][i])).to(device) + elif self.feature_name[key] == 'no_pad_float': + for i in range(len(self.data[key])): + self.data[key][i] = torch.FloatTensor(np.array(self.data[key][i])).to(device) + elif self.feature_name[key] == 'no_tensor': + pass + else: + raise TypeError( + 'Batch to_tensor, only support int, float, array of int, no_pad_float.\ + and you give {}'.format(self.feature_name[key])) + + def to_ndarray(self): + for key in self.data: + if self.feature_name[key] == 'int': + self.data[key] = np.array(self.data[key]) + elif self.feature_name[key] == 'float': + self.data[key] = np.array(self.data[key]) + else: + raise TypeError( + 'Batch to_tensor, only support int, float, array of int, no_pad_float.\ + and you give {}'.format(self.feature_name[key])) \ No newline at end of file diff --git a/libcity/data/dataset/__init__.py b/libcity/data/dataset/__init__.py new file mode 100644 index 0000000..972ae1a --- /dev/null +++ b/libcity/data/dataset/__init__.py @@ -0,0 +1,18 @@ +from libcity.data.dataset.abstract_dataset import AbstractDataset +from libcity.data.dataset.traffic_state_datatset import TrafficStateDataset +from libcity.data.dataset.traffic_state_point_dataset import \ + TrafficStatePointDataset +from libcity.data.dataset.traffic_state_grid_dataset import \ + TrafficStateGridDataset +from libcity.data.dataset.pdformer_dataset import PDFormerDataset +from libcity.data.dataset.pdformer_grid_dataset import PDFormerGridDataset + + +__all__ = [ + "AbstractDataset", + "TrafficStateDataset", + "TrafficStatePointDataset", + "TrafficStateGridDataset", + "PDFormerDataset", + "PDFormerGridDataset", +] diff --git a/libcity/data/dataset/abstract_dataset.py b/libcity/data/dataset/abstract_dataset.py new file mode 100644 index 0000000..8932957 --- /dev/null +++ b/libcity/data/dataset/abstract_dataset.py @@ -0,0 +1,10 @@ +class AbstractDataset(object): + + def __init__(self, config): + raise NotImplementedError("Dataset not implemented") + + def get_data(self): + raise NotImplementedError("get_data not implemented") + + def get_data_feature(self): + raise NotImplementedError("get_data_feature not implemented") \ No newline at end of file diff --git a/libcity/data/dataset/pdformer_dataset.py b/libcity/data/dataset/pdformer_dataset.py new file mode 100644 index 0000000..cbb3b2b --- /dev/null +++ b/libcity/data/dataset/pdformer_dataset.py @@ -0,0 +1,137 @@ +import os +import numpy as np +from fastdtw import fastdtw +from tqdm import tqdm +from libcity.data.dataset import TrafficStatePointDataset +from libcity.data.utils import generate_dataloader +from tslearn.clustering import TimeSeriesKMeans, KShape + + +class PDFormerDataset(TrafficStatePointDataset): + + def __init__(self, config): + self.type_short_path = config.get('type_short_path', 'hop') + super().__init__(config) + self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/', + 'pdformer_point_based_{}.npz'.format(self.parameters_str)) + self.points_per_hour = 3600 // self.time_intervals + self.dtw_matrix = self._get_dtw() + self.points_per_day = 24 * 3600 // self.time_intervals + self.cand_key_days = config.get("cand_key_days", 14) + self.s_attn_size = config.get("s_attn_size", 3) + self.n_cluster = config.get("n_cluster", 16) + self.cluster_max_iter = config.get("cluster_max_iter", 5) + self.cluster_method = config.get("cluster_method", "kshape") + + def _get_dtw(self): + cache_path = './libcity/cache/dataset_cache/dtw_' + self.dataset + '.npy' + for ind, filename in enumerate(self.data_files): + if ind == 0: + df = self._load_dyna(filename) + else: + df = np.concatenate((df, self._load_dyna(filename)), axis=0) + if not os.path.exists(cache_path): + data_mean = np.mean( + [df[24 * self.points_per_hour * i: 24 * self.points_per_hour * (i + 1)] + for i in range(df.shape[0] // (24 * self.points_per_hour))], axis=0) + dtw_distance = np.zeros((self.num_nodes, self.num_nodes)) + for i in tqdm(range(self.num_nodes)): + for j in range(i, self.num_nodes): + dtw_distance[i][j], _ = fastdtw(data_mean[:, i, :], data_mean[:, j, :], radius=6) + for i in range(self.num_nodes): + for j in range(i): + dtw_distance[i][j] = dtw_distance[j][i] + np.save(cache_path, dtw_distance) + dtw_matrix = np.load(cache_path) + self._logger.info('Load DTW matrix from {}'.format(cache_path)) + return dtw_matrix + + def _load_rel(self): + self.sd_mx = None + super()._load_rel() + self._logger.info('Max adj_mx value = {}'.format(self.adj_mx.max())) + self.sh_mx = self.adj_mx.copy() + if self.type_short_path == 'hop': + self.sh_mx[self.sh_mx > 0] = 1 + self.sh_mx[self.sh_mx == 0] = 511 + for i in range(self.num_nodes): + self.sh_mx[i, i] = 0 + for k in range(self.num_nodes): + for i in range(self.num_nodes): + for j in range(self.num_nodes): + self.sh_mx[i, j] = min(self.sh_mx[i, j], self.sh_mx[i, k] + self.sh_mx[k, j], 511) + np.save('{}.npy'.format(self.dataset), self.sh_mx) + + def _calculate_adjacency_matrix(self): + self._logger.info("Start Calculate the weight by Gauss kernel!") + self.sd_mx = self.adj_mx.copy() + distances = self.adj_mx[~np.isinf(self.adj_mx)].flatten() + std = distances.std() + self.adj_mx = np.exp(-np.square(self.adj_mx / std)) + self.adj_mx[self.adj_mx < self.weight_adj_epsilon] = 0 + if self.type_short_path == 'dist': + self.sd_mx[self.adj_mx == 0] = np.inf + for k in range(self.num_nodes): + for i in range(self.num_nodes): + for j in range(self.num_nodes): + self.sd_mx[i, j] = min(self.sd_mx[i, j], self.sd_mx[i, k] + self.sd_mx[k, j]) + + def get_data(self): + x_train, y_train, x_val, y_val, x_test, y_test = [], [], [], [], [], [] + if self.data is None: + self.data = {} + if self.cache_dataset and os.path.exists(self.cache_file_name): + x_train, y_train, x_val, y_val, x_test, y_test = self._load_cache_train_val_test() + else: + x_train, y_train, x_val, y_val, x_test, y_test = self._generate_train_val_test() + self.feature_dim = x_train.shape[-1] + self.ext_dim = self.feature_dim - self.output_dim + self.scaler = self._get_scalar(self.scaler_type, + x_train[..., :self.output_dim], y_train[..., :self.output_dim]) + self.ext_scaler = self._get_scalar(self.ext_scaler_type, + x_train[..., self.output_dim:], y_train[..., self.output_dim:]) + x_train[..., :self.output_dim] = self.scaler.transform(x_train[..., :self.output_dim]) + y_train[..., :self.output_dim] = self.scaler.transform(y_train[..., :self.output_dim]) + x_val[..., :self.output_dim] = self.scaler.transform(x_val[..., :self.output_dim]) + y_val[..., :self.output_dim] = self.scaler.transform(y_val[..., :self.output_dim]) + x_test[..., :self.output_dim] = self.scaler.transform(x_test[..., :self.output_dim]) + y_test[..., :self.output_dim] = self.scaler.transform(y_test[..., :self.output_dim]) + if self.normal_external: + x_train[..., self.output_dim:] = self.ext_scaler.transform(x_train[..., self.output_dim:]) + y_train[..., self.output_dim:] = self.ext_scaler.transform(y_train[..., self.output_dim:]) + x_val[..., self.output_dim:] = self.ext_scaler.transform(x_val[..., self.output_dim:]) + y_val[..., self.output_dim:] = self.ext_scaler.transform(y_val[..., self.output_dim:]) + x_test[..., self.output_dim:] = self.ext_scaler.transform(x_test[..., self.output_dim:]) + y_test[..., self.output_dim:] = self.ext_scaler.transform(y_test[..., self.output_dim:]) + train_data = list(zip(x_train, y_train)) + eval_data = list(zip(x_val, y_val)) + test_data = list(zip(x_test, y_test)) + self.train_dataloader, self.eval_dataloader, self.test_dataloader = \ + generate_dataloader(train_data, eval_data, test_data, self.feature_name, + self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample, + distributed=self.distributed) + self.num_batches = len(self.train_dataloader) + self.pattern_key_file = os.path.join( + './libcity/cache/dataset_cache/', 'pattern_keys_{}_{}_{}_{}_{}_{}'.format( + self.cluster_method, self.dataset, self.cand_key_days, self.s_attn_size, self.n_cluster, self.cluster_max_iter)) + if not os.path.exists(self.pattern_key_file + '.npy'): + cand_key_time_steps = self.cand_key_days * self.points_per_day + pattern_cand_keys = x_train[:cand_key_time_steps, :self.s_attn_size, :, :self.output_dim].swapaxes(1, 2).reshape(-1, self.s_attn_size, self.output_dim) + self._logger.info("Clustering...") + if self.cluster_method == "kshape": + km = KShape(n_clusters=self.n_cluster, max_iter=self.cluster_max_iter).fit(pattern_cand_keys) + else: + km = TimeSeriesKMeans(n_clusters=self.n_cluster, metric="softdtw", max_iter=self.cluster_max_iter).fit(pattern_cand_keys) + self.pattern_keys = km.cluster_centers_ + np.save(self.pattern_key_file, self.pattern_keys) + self._logger.info("Saved at file " + self.pattern_key_file + ".npy") + else: + self.pattern_keys = np.load(self.pattern_key_file + ".npy") + self._logger.info("Loaded file " + self.pattern_key_file + ".npy") + return self.train_dataloader, self.eval_dataloader, self.test_dataloader + + def get_data_feature(self): + return {"scaler": self.scaler, "adj_mx": self.adj_mx, "sd_mx": self.sd_mx, "sh_mx": self.sh_mx, + "ext_dim": self.ext_dim, "num_nodes": self.num_nodes, "feature_dim": self.feature_dim, + "output_dim": self.output_dim, "num_batches": self.num_batches, + "dtw_matrix": self.dtw_matrix, "pattern_keys": self.pattern_keys} diff --git a/libcity/data/dataset/pdformer_grid_dataset.py b/libcity/data/dataset/pdformer_grid_dataset.py new file mode 100644 index 0000000..7d1fd16 --- /dev/null +++ b/libcity/data/dataset/pdformer_grid_dataset.py @@ -0,0 +1,127 @@ +import os +import numpy as np +from fastdtw import fastdtw +from tqdm import tqdm +from libcity.data.dataset import TrafficStateGridDataset +from libcity.data.utils import generate_dataloader +from tslearn.clustering import TimeSeriesKMeans, KShape + + +class PDFormerGridDataset(TrafficStateGridDataset): + + def __init__(self, config): + self.type_short_path = config.get('type_short_path', 'dist') + super().__init__(config) + self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/', + 'pdformer_grid_based_{}.npz'.format(self.parameters_str)) + self.points_per_hour = 3600 // self.time_intervals + self.dtw_matrix = self._get_dtw() + self.points_per_day = 24 * 3600 // self.time_intervals + self.cand_key_days = config.get("cand_key_days", 14) + self.s_attn_size = config.get("s_attn_size", 3) + self.n_cluster = config.get("n_cluster", 16) + self.cluster_max_iter = config.get("cluster_max_iter", 5) + self.cluster_method = config.get("cluster_method", "kshape") + + def _get_dtw(self): + cache_path = './libcity/cache/dataset_cache/dtw_' + self.dataset + '.npy' + for ind, filename in enumerate(self.data_files): + if ind == 0: + df = self._load_dyna(filename) + else: + df = np.concatenate((df, self._load_dyna(filename)), axis=0) + if not os.path.exists(cache_path): + data_mean = np.mean( + [df[24 * self.points_per_hour * i: 24 * self.points_per_hour * (i + 1)] + for i in range(df.shape[0] // (24 * self.points_per_hour))], axis=0) + dtw_distance = np.zeros((self.num_nodes, self.num_nodes)) + for i in tqdm(range(self.num_nodes)): + for j in range(i, self.num_nodes): + dtw_distance[i][j], _ = fastdtw(data_mean[:, i, :], data_mean[:, j, :], radius=6) + for i in range(self.num_nodes): + for j in range(i): + dtw_distance[i][j] = dtw_distance[j][i] + np.save(cache_path, dtw_distance) + dtw_matrix = np.load(cache_path) + self._logger.info('Load DTW matrix from {}'.format(cache_path)) + return dtw_matrix + + def _load_rel(self): + self.sd_mx = None + super()._load_grid_rel() + self._logger.info('Max adj_mx value = {}'.format(self.adj_mx.max())) + self.sh_mx = self.adj_mx.copy() + if self.type_short_path == 'hop': + self.sh_mx[self.sh_mx > 0] = 1 + self.sh_mx[self.sh_mx == 0] = 511 + for i in range(self.num_nodes): + self.sh_mx[i, i] = 0 + for i in range(self.num_nodes): + for j in range(self.num_nodes): + i_x, i_y = i // self.len_column, i % self.len_column + j_x, j_y = j // self.len_column, j % self.len_column + self.sh_mx[i, j] = min(max(abs(i_x - j_x), abs(i_y - j_y)), 511) + + def get_data(self): + x_train, y_train, x_val, y_val, x_test, y_test = [], [], [], [], [], [] + if self.data is None: + self.data = {} + if self.cache_dataset and os.path.exists(self.cache_file_name): + x_train, y_train, x_val, y_val, x_test, y_test = self._load_cache_train_val_test() + else: + x_train, y_train, x_val, y_val, x_test, y_test = self._generate_train_val_test() + self.feature_dim = x_train.shape[-1] + self.ext_dim = self.feature_dim - self.output_dim + self.scaler = self._get_scalar(self.scaler_type, + x_train[..., :self.output_dim], y_train[..., :self.output_dim]) + self.ext_scaler = self._get_scalar(self.ext_scaler_type, + x_train[..., self.output_dim:], y_train[..., self.output_dim:]) + x_train[..., :self.output_dim] = self.scaler.transform(x_train[..., :self.output_dim]) + y_train[..., :self.output_dim] = self.scaler.transform(y_train[..., :self.output_dim]) + x_val[..., :self.output_dim] = self.scaler.transform(x_val[..., :self.output_dim]) + y_val[..., :self.output_dim] = self.scaler.transform(y_val[..., :self.output_dim]) + x_test[..., :self.output_dim] = self.scaler.transform(x_test[..., :self.output_dim]) + y_test[..., :self.output_dim] = self.scaler.transform(y_test[..., :self.output_dim]) + if self.normal_external: + x_train[..., self.output_dim:] = self.ext_scaler.transform(x_train[..., self.output_dim:]) + y_train[..., self.output_dim:] = self.ext_scaler.transform(y_train[..., self.output_dim:]) + x_val[..., self.output_dim:] = self.ext_scaler.transform(x_val[..., self.output_dim:]) + y_val[..., self.output_dim:] = self.ext_scaler.transform(y_val[..., self.output_dim:]) + x_test[..., self.output_dim:] = self.ext_scaler.transform(x_test[..., self.output_dim:]) + y_test[..., self.output_dim:] = self.ext_scaler.transform(y_test[..., self.output_dim:]) + train_data = list(zip(x_train, y_train)) + eval_data = list(zip(x_val, y_val)) + test_data = list(zip(x_test, y_test)) + self.train_dataloader, self.eval_dataloader, self.test_dataloader = \ + generate_dataloader(train_data, eval_data, test_data, self.feature_name, + self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample, + distributed=self.distributed) + self.num_batches = len(self.train_dataloader) + self.pattern_key_file = os.path.join( + './libcity/cache/dataset_cache/', 'pattern_keys_{}_{}_{}_{}_{}_{}'.format( + self.cluster_method, self.dataset, self.cand_key_days, self.s_attn_size, self.n_cluster, self.cluster_max_iter)) + if not os.path.exists(self.pattern_key_file + ".npy"): + cand_key_time_steps = self.cand_key_days * self.points_per_day + pattern_cand_keys = x_train[:cand_key_time_steps, :self.s_attn_size, :, :self.output_dim].swapaxes(1, 2).reshape(-1, self.s_attn_size, self.output_dim) + self._logger.info("Clustering...") + if self.cluster_method == "kshape": + pattern_key_list = [] + for i in range(self.output_dim): + km = KShape(n_clusters=self.n_cluster, max_iter=self.cluster_max_iter).fit(pattern_cand_keys[..., i: i+1]) + pattern_key_list.append(km.cluster_centers_) + self.pattern_keys = np.concatenate(pattern_key_list, axis=-1) + else: + km = TimeSeriesKMeans(n_clusters=self.n_cluster, metric="euclidean", max_iter=self.cluster_max_iter).fit(pattern_cand_keys) + self.pattern_keys = km.cluster_centers_ + np.save(self.pattern_key_file, self.pattern_keys) + self._logger.info("Saved at file " + self.pattern_key_file + ".npy") + else: + self.pattern_keys = np.load(self.pattern_key_file + ".npy") + self._logger.info("Loaded file " + self.pattern_key_file + ".npy") + return self.train_dataloader, self.eval_dataloader, self.test_dataloader + + def get_data_feature(self): + return {"scaler": self.scaler, "adj_mx": self.adj_mx, "sd_mx": self.sd_mx, "sh_mx": self.sh_mx, + "ext_dim": self.ext_dim, "num_nodes": self.num_nodes, "feature_dim": self.feature_dim, + "output_dim": self.output_dim, "num_batches": self.num_batches, + "dtw_matrix": self.dtw_matrix, "pattern_keys": self.pattern_keys} diff --git a/libcity/data/dataset/traffic_state_datatset.py b/libcity/data/dataset/traffic_state_datatset.py new file mode 100644 index 0000000..20923f1 --- /dev/null +++ b/libcity/data/dataset/traffic_state_datatset.py @@ -0,0 +1,665 @@ +import os +import pandas as pd +import numpy as np +import datetime +from logging import getLogger + +from libcity.data.dataset import AbstractDataset +from libcity.data.utils import generate_dataloader +from libcity.utils import StandardScaler, NormalScaler, NoneScaler, \ + MinMax01Scaler, MinMax11Scaler, LogScaler, ensure_dir + + +class TrafficStateDataset(AbstractDataset): + + def __init__(self, config): + self.config = config + self.dataset = self.config.get('dataset', '') + self.batch_size = self.config.get('batch_size', 64) + self.cache_dataset = self.config.get('cache_dataset', True) + self.num_workers = self.config.get('num_workers', 0) + self.pad_with_last_sample = self.config.get('pad_with_last_sample', True) + self.train_rate = self.config.get('train_rate', 0.7) + self.part_train_rate = self.config.get("part_train_rate", 1) + self.eval_rate = self.config.get('eval_rate', 0.1) + self.scaler_type = self.config.get('scaler', 'none') + self.ext_scaler_type = self.config.get('ext_scaler', 'none') + self.load_external = self.config.get('load_external', False) + self.normal_external = self.config.get('normal_external', False) + self.add_time_in_day = self.config.get('add_time_in_day', False) + self.add_day_in_week = self.config.get('add_day_in_week', False) + self.input_window = self.config.get('input_window', 12) + self.output_window = self.config.get('output_window', 12) + self.bidir = self.config.get('bidir', False) + self.data_col = self.config.get('data_col', '') + self.parameters_str = \ + str(self.dataset) + '_' + str(self.input_window) + '_' + str(self.output_window) + '_' \ + + str(self.train_rate) + '_' + str(self.part_train_rate) + '_' + str(self.eval_rate) + '_' + str(self.scaler_type) + '_' \ + + str(self.batch_size) + '_' + str(self.load_external) + '_' + str(self.add_time_in_day) + '_' \ + + str(self.add_day_in_week) + '_' + str(self.pad_with_last_sample) + '_' + str("".join(self.data_col)) + self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/', + 'traffic_state_{}.npz'.format(self.parameters_str)) + self.cache_file_folder = './libcity/cache/dataset_cache/' + ensure_dir(self.cache_file_folder) + self.data_path = './raw_data/' + self.dataset + '/' + if not os.path.exists(self.data_path): + raise ValueError("Dataset {} not exist! Please ensure the path " + "'./raw_data/{}/' exist!".format(self.dataset, self.dataset)) + self.weight_col = self.config.get('weight_col', '') + self.ext_col = self.config.get('ext_col', '') + self.geo_file = self.config.get('geo_file', self.dataset) + self.rel_file = self.config.get('rel_file', self.dataset) + self.data_files = self.config.get('data_files', self.dataset) + self.ext_file = self.config.get('ext_file', self.dataset) + self.output_dim = self.config.get('output_dim', 1) + self.time_intervals = self.config.get('time_intervals', 300) + self.init_weight_inf_or_zero = self.config.get('init_weight_inf_or_zero', 'inf') + self.set_weight_link_or_dist = self.config.get('set_weight_link_or_dist', 'dist') + self.calculate_weight_adj = self.config.get('calculate_weight_adj', False) + self.weight_adj_epsilon = self.config.get('weight_adj_epsilon', 0.1) + self.data = None + self.feature_name = {'X': 'float', 'y': 'float'} + self.adj_mx = None + self.scaler = None + self.ext_scaler = None + self.feature_dim = 0 + self.ext_dim = 0 + self.num_nodes = 0 + self.num_batches = 0 + self._logger = getLogger() + self.rank = self.config.get('rank', 0) + self.distributed = self.config.get('distributed', False) + if os.path.exists(self.data_path + self.geo_file + '.geo'): + self._load_geo() + else: + raise ValueError('Not found .geo file!') + if os.path.exists(self.data_path + self.rel_file + '.rel'): + self._load_rel() + else: + self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32) + + def _load_geo(self): + geofile = pd.read_csv(self.data_path + self.geo_file + '.geo') + self.geo_ids = list(geofile['geo_id']) + self.num_nodes = len(self.geo_ids) + self.geo_to_ind = {} + for index, idx in enumerate(self.geo_ids): + self.geo_to_ind[idx] = index + self._logger.info("Loaded file " + self.geo_file + '.geo' + ', num_nodes=' + str(len(self.geo_ids))) + + def _load_grid_geo(self): + geofile = pd.read_csv(self.data_path + self.geo_file + '.geo') + self.geo_ids = list(geofile['geo_id']) + self.num_nodes = len(self.geo_ids) + self.geo_to_ind = {} + self.geo_to_rc = {} + for index, idx in enumerate(self.geo_ids): + self.geo_to_ind[idx] = index + for i in range(geofile.shape[0]): + self.geo_to_rc[geofile['geo_id'][i]] = [geofile['row_id'][i], geofile['column_id'][i]] + self.len_row = max(list(geofile['row_id'])) + 1 + self.len_column = max(list(geofile['column_id'])) + 1 + self._logger.info("Loaded file " + self.geo_file + '.geo' + ', num_grids=' + str(len(self.geo_ids)) + + ', grid_size=' + str((self.len_row, self.len_column))) + + def _load_rel(self): + relfile = pd.read_csv(self.data_path + self.rel_file + '.rel') + self._logger.info('set_weight_link_or_dist: {}'.format(self.set_weight_link_or_dist)) + self._logger.info('init_weight_inf_or_zero: {}'.format(self.init_weight_inf_or_zero)) + if self.weight_col != '': + if isinstance(self.weight_col, list): + if len(self.weight_col) != 1: + raise ValueError('`weight_col` parameter must be only one column!') + self.weight_col = self.weight_col[0] + self.distance_df = relfile[~relfile[self.weight_col].isna()][[ + 'origin_id', 'destination_id', self.weight_col]] + else: + if len(relfile.columns) != 5: + raise ValueError("Don't know which column to be loaded! Please set `weight_col` parameter!") + else: + self.weight_col = relfile.columns[-1] + self.distance_df = relfile[~relfile[self.weight_col].isna()][[ + 'origin_id', 'destination_id', self.weight_col]] + self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32) + if self.init_weight_inf_or_zero.lower() == 'inf' and self.set_weight_link_or_dist.lower() != 'link': + self.adj_mx[:] = np.inf + for row in self.distance_df.values: + if row[0] not in self.geo_to_ind or row[1] not in self.geo_to_ind: + continue + if self.set_weight_link_or_dist.lower() == 'dist': + self.adj_mx[self.geo_to_ind[row[0]], self.geo_to_ind[row[1]]] = row[2] + if self.bidir: + self.adj_mx[self.geo_to_ind[row[1]], self.geo_to_ind[row[0]]] = row[2] + else: + self.adj_mx[self.geo_to_ind[row[0]], self.geo_to_ind[row[1]]] = 1 + if self.bidir: + self.adj_mx[self.geo_to_ind[row[1]], self.geo_to_ind[row[0]]] = 1 + self._logger.info("Loaded file " + self.rel_file + '.rel, shape=' + str(self.adj_mx.shape)) + if self.calculate_weight_adj: + self._calculate_adjacency_matrix() + + def _load_grid_rel(self): + self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32) + dirs = [[0, 1], [1, 0], [-1, 0], [0, -1], [1, 1], [1, -1], [-1, 1], [-1, -1]] + for i in range(self.len_row): + for j in range(self.len_column): + index = i * self.len_column + j + for d in dirs: + nei_i = i + d[0] + nei_j = j + d[1] + if nei_i >= 0 and nei_i < self.len_row and nei_j >= 0 and nei_j < self.len_column: + nei_index = nei_i * self.len_column + nei_j + self.adj_mx[index][nei_index] = 1 + self.adj_mx[nei_index][index] = 1 + self._logger.info("Generate grid rel file, shape=" + str(self.adj_mx.shape)) + + def _calculate_adjacency_matrix(self): + self._logger.info("Start Calculate the weight by Gauss kernel!") + distances = self.adj_mx[~np.isinf(self.adj_mx)].flatten() + std = distances.std() + self.adj_mx = np.exp(-np.square(self.adj_mx / std)) + self.adj_mx[self.adj_mx < self.weight_adj_epsilon] = 0 + + def _load_dyna(self, filename): + raise NotImplementedError('Please implement the function `_load_dyna()`.') + + def _load_dyna_3d(self, filename): + self._logger.info("Loading file " + filename + '.dyna') + dynafile = pd.read_csv(self.data_path + filename + '.dyna') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'entity_id') + dynafile = dynafile[data_col] + else: + dynafile = dynafile[dynafile.columns[2:]] + self.timesolts = list(dynafile['time'][:int(dynafile.shape[0] / len(self.geo_ids))]) + self.idx_of_timesolts = dict() + if not dynafile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + feature_dim = len(dynafile.columns) - 2 + df = dynafile[dynafile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = [] + for i in range(0, df.shape[0], len_time): + data.append(df[i:i+len_time].values) + data = np.array(data, dtype=np.float) + data = data.swapaxes(0, 1) + self._logger.info("Loaded file " + filename + '.dyna' + ', shape=' + str(data.shape)) + return data + + def _load_grid_3d(self, filename): + self._logger.info("Loading file " + filename + '.grid') + gridfile = pd.read_csv(self.data_path + filename + '.grid') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'row_id') + data_col.insert(2, 'column_id') + gridfile = gridfile[data_col] + else: + gridfile = gridfile[gridfile.columns[2:]] + self.timesolts = list(gridfile['time'][:int(gridfile.shape[0] / len(self.geo_ids))]) + self.idx_of_timesolts = dict() + if not gridfile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + feature_dim = len(gridfile.columns) - 3 + df = gridfile[gridfile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = [] + for i in range(0, df.shape[0], len_time): + data.append(df[i:i + len_time].values) + data = np.array(data, dtype=np.float) + data = data.swapaxes(0, 1) + self._logger.info("Loaded file " + filename + '.grid' + ', shape=' + str(data.shape)) + return data + + def _load_grid_4d(self, filename): + self._logger.info("Loading file " + filename + '.grid') + gridfile = pd.read_csv(self.data_path + filename + '.grid') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'row_id') + data_col.insert(2, 'column_id') + gridfile = gridfile[data_col] + else: + gridfile = gridfile[gridfile.columns[2:]] + self.timesolts = list(gridfile['time'][:int(gridfile.shape[0] / len(self.geo_ids))]) + self.idx_of_timesolts = dict() + if not gridfile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + feature_dim = len(gridfile.columns) - 3 + df = gridfile[gridfile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = [] + for i in range(self.len_row): + tmp = [] + for j in range(self.len_column): + index = (i * self.len_column + j) * len_time + tmp.append(df[index:index + len_time].values) + data.append(tmp) + data = np.array(data, dtype=np.float) + data = data.swapaxes(2, 0).swapaxes(1, 2) + self._logger.info("Loaded file " + filename + '.grid' + ', shape=' + str(data.shape)) + return data + + def _load_od_4d(self, filename): + self._logger.info("Loading file " + filename + '.od') + odfile = pd.read_csv(self.data_path + filename + '.od') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'origin_id') + data_col.insert(2, 'destination_id') + odfile = odfile[data_col] + else: + odfile = odfile[odfile.columns[2:]] + self.timesolts = list(odfile['time'][:int(odfile.shape[0] / self.num_nodes / self.num_nodes)]) + self.idx_of_timesolts = dict() + if not odfile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + + feature_dim = len(odfile.columns) - 3 + df = odfile[odfile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = np.zeros((self.num_nodes, self.num_nodes, len_time, feature_dim)) + for i in range(self.num_nodes): + origin_index = i * len_time * self.num_nodes + for j in range(self.num_nodes): + destination_index = j * len_time + index = origin_index + destination_index + data[i][j] = df[index:index + len_time].values + data = data.transpose((2, 0, 1, 3)) + self._logger.info("Loaded file " + filename + '.od' + ', shape=' + str(data.shape)) + return data + + def _load_grid_od_4d(self, filename): + self._logger.info("Loading file " + filename + '.gridod') + gridodfile = pd.read_csv(self.data_path + filename + '.gridod') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'origin_row_id') + data_col.insert(2, 'origin_column_id') + data_col.insert(3, 'destination_row_id') + data_col.insert(4, 'destination_column_id') + gridodfile = gridodfile[data_col] + else: + gridodfile = gridodfile[gridodfile.columns[2:]] + self.timesolts = list(gridodfile['time'][:int(gridodfile.shape[0] / len(self.geo_ids) / len(self.geo_ids))]) + self.idx_of_timesolts = dict() + if not gridodfile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + feature_dim = len(gridodfile.columns) - 5 + df = gridodfile[gridodfile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = np.zeros((len(self.geo_ids), len(self.geo_ids), len_time, feature_dim)) + for oi in range(self.len_row): + for oj in range(self.len_column): + origin_index = (oi * self.len_column + oj) * len_time * len(self.geo_ids) + for di in range(self.len_row): + for dj in range(self.len_column): + destination_index = (di * self.len_column + dj) * len_time + index = origin_index + destination_index + data[oi * self.len_column + oj][di * self.len_column + dj] = df[index:index + len_time].values + data = data.transpose((2, 0, 1, 3)) + self._logger.info("Loaded file " + filename + '.gridod' + ', shape=' + str(data.shape)) + return data + + def _load_grid_od_6d(self, filename): + self._logger.info("Loading file " + filename + '.gridod') + gridodfile = pd.read_csv(self.data_path + filename + '.gridod') + if self.data_col != '': + if isinstance(self.data_col, list): + data_col = self.data_col.copy() + else: + data_col = [self.data_col].copy() + data_col.insert(0, 'time') + data_col.insert(1, 'origin_row_id') + data_col.insert(2, 'origin_column_id') + data_col.insert(3, 'destination_row_id') + data_col.insert(4, 'destination_column_id') + gridodfile = gridodfile[data_col] + else: + gridodfile = gridodfile[gridodfile.columns[2:]] + self.timesolts = list(gridodfile['time'][:int(gridodfile.shape[0] / len(self.geo_ids) / len(self.geo_ids))]) + self.idx_of_timesolts = dict() + if not gridodfile['time'].isna().any(): + self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts)) + self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.timesolts): + self.idx_of_timesolts[_ts] = idx + feature_dim = len(gridodfile.columns) - 5 + df = gridodfile[gridodfile.columns[-feature_dim:]] + len_time = len(self.timesolts) + data = np.zeros((self.len_row, self.len_column, self.len_row, self.len_column, len_time, feature_dim)) + for oi in range(self.len_row): + for oj in range(self.len_column): + origin_index = (oi * self.len_column + oj) * len_time * len(self.geo_ids) + for di in range(self.len_row): + for dj in range(self.len_column): + destination_index = (di * self.len_column + dj) * len_time + index = origin_index + destination_index + data[oi][oj][di][dj] = df[index:index + len_time].values + data = data.transpose((4, 0, 1, 2, 3, 5)) + self._logger.info("Loaded file " + filename + '.gridod' + ', shape=' + str(data.shape)) + return data + + def _load_ext(self): + extfile = pd.read_csv(self.data_path + self.ext_file + '.ext') + if self.ext_col != '': + if isinstance(self.ext_col, list): + ext_col = self.ext_col.copy() + else: + ext_col = [self.ext_col].copy() + ext_col.insert(0, 'time') + extfile = extfile[ext_col] + else: + extfile = extfile[extfile.columns[1:]] + self.ext_timesolts = extfile['time'] + self.idx_of_ext_timesolts = dict() + if not extfile['time'].isna().any(): + self.ext_timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.ext_timesolts)) + self.ext_timesolts = np.array(self.ext_timesolts, dtype='datetime64[ns]') + for idx, _ts in enumerate(self.ext_timesolts): + self.idx_of_ext_timesolts[_ts] = idx + feature_dim = len(extfile.columns) - 1 + df = extfile[extfile.columns[-feature_dim:]].values + self._logger.info("Loaded file " + self.ext_file + '.ext' + ', shape=' + str(df.shape)) + return df + + def _add_external_information(self, df, ext_data=None): + raise NotImplementedError('Please implement the function `_add_external_information()`.') + + def _add_external_information_3d(self, df, ext_data=None): + num_samples, num_nodes, feature_dim = df.shape + is_time_nan = np.isnan(self.timesolts).any() + data_list = [df] + if self.add_time_in_day and not is_time_nan: + time_ind = (self.timesolts - self.timesolts.astype("datetime64[D]")) / np.timedelta64(1, "D") + time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) + data_list.append(time_in_day) + if self.add_day_in_week and not is_time_nan: + dayofweek = [] + for day in self.timesolts.astype("datetime64[D]"): + dayofweek.append(datetime.datetime.strptime(str(day), '%Y-%m-%d').weekday()) + day_in_week = np.zeros(shape=(num_samples, num_nodes, 7)) + day_in_week[np.arange(num_samples), :, dayofweek] = 1 + data_list.append(day_in_week) + if ext_data is not None: + if not is_time_nan: + indexs = [] + for ts in self.timesolts: + ts_index = self.idx_of_ext_timesolts[ts] + indexs.append(ts_index) + select_data = ext_data[indexs] + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) + data_list.append(data_ind) + else: + if ext_data.shape[0] == df.shape[0]: + select_data = ext_data + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) + data_list.append(data_ind) + data = np.concatenate(data_list, axis=-1) + return data + + def _add_external_information_4d(self, df, ext_data=None): + num_samples, len_row, len_column, feature_dim = df.shape + is_time_nan = np.isnan(self.timesolts).any() + data_list = [df] + if self.add_time_in_day and not is_time_nan: + time_ind = (self.timesolts - self.timesolts.astype("datetime64[D]")) / np.timedelta64(1, "D") + time_in_day = np.tile(time_ind, [1, len_row, len_column, 1]).transpose((3, 1, 2, 0)) + data_list.append(time_in_day) + if self.add_day_in_week and not is_time_nan: + dayofweek = [] + for day in self.timesolts.astype("datetime64[D]"): + dayofweek.append(datetime.datetime.strptime(str(day), '%Y-%m-%d').weekday()) + day_in_week = np.zeros(shape=(num_samples, len_row, len_column, 7)) + day_in_week[np.arange(num_samples), :, :, dayofweek] = 1 + data_list.append(day_in_week) + if ext_data is not None: + if not is_time_nan: + indexs = [] + for ts in self.timesolts: + ts_index = self.idx_of_ext_timesolts[ts] + indexs.append(ts_index) + select_data = ext_data[indexs] + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, len_row, len_column, 1]).transpose((3, 1, 2, 0)) + data_list.append(data_ind) + else: + if ext_data.shape[0] == df.shape[0]: + select_data = ext_data + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, len_row, len_column, 1]).transpose((3, 1, 2, 0)) + data_list.append(data_ind) + data = np.concatenate(data_list, axis=-1) + return data + + def _add_external_information_6d(self, df, ext_data=None): + num_samples, len_row, len_column, _, _, feature_dim = df.shape + is_time_nan = np.isnan(self.timesolts).any() + data_list = [df] + if self.add_time_in_day and not is_time_nan: + time_ind = (self.timesolts - self.timesolts.astype("datetime64[D]")) / np.timedelta64(1, "D") + time_in_day = np.tile(time_ind, [1, len_row, len_column, len_row, len_column, 1]).\ + transpose((5, 1, 2, 3, 4, 0)) + data_list.append(time_in_day) + if self.add_day_in_week and not is_time_nan: + dayofweek = [] + for day in self.timesolts.astype("datetime64[D]"): + dayofweek.append(datetime.datetime.strptime(str(day), '%Y-%m-%d').weekday()) + day_in_week = np.zeros(shape=(num_samples, len_row, len_column, len_row, len_column, 7)) + day_in_week[np.arange(num_samples), :, :, :, :, dayofweek] = 1 + data_list.append(day_in_week) + if ext_data is not None: + if not is_time_nan: + indexs = [] + for ts in self.timesolts: + ts_index = self.idx_of_ext_timesolts[ts] + indexs.append(ts_index) + select_data = ext_data[indexs] + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, len_row, len_column, len_row, len_column, 1]). \ + transpose((5, 1, 2, 3, 4, 0)) + data_list.append(data_ind) + else: + if ext_data.shape[0] == df.shape[0]: + select_data = ext_data + for i in range(select_data.shape[1]): + data_ind = select_data[:, i] + data_ind = np.tile(data_ind, [1, len_row, len_column, len_row, len_column, 1]). \ + transpose((5, 1, 2, 3, 4, 0)) + data_list.append(data_ind) + data = np.concatenate(data_list, axis=-1) + return data + + def _generate_input_data(self, df): + num_samples = df.shape[0] + x_offsets = np.sort(np.concatenate((np.arange(-self.input_window + 1, 1, 1),))) + y_offsets = np.sort(np.arange(1, self.output_window + 1, 1)) + + x, y = [], [] + min_t = abs(min(x_offsets)) + max_t = abs(num_samples - abs(max(y_offsets))) + for t in range(min_t, max_t): + x_t = df[t + x_offsets, ...] + y_t = df[t + y_offsets, ...] + x.append(x_t) + y.append(y_t) + x = np.stack(x, axis=0) + y = np.stack(y, axis=0) + return x, y + + def _generate_data(self): + if isinstance(self.data_files, list): + data_files = self.data_files.copy() + else: + data_files = [self.data_files].copy() + if self.load_external and os.path.exists(self.data_path + self.ext_file + '.ext'): + ext_data = self._load_ext() + else: + ext_data = None + x_list, y_list = [], [] + for filename in data_files: + df = self._load_dyna(filename) + if self.load_external: + df = self._add_external_information(df, ext_data) + x, y = self._generate_input_data(df) + x_list.append(x) + y_list.append(y) + x = np.concatenate(x_list) + y = np.concatenate(y_list) + self._logger.info("Dataset created") + self._logger.info("x shape: " + str(x.shape) + ", y shape: " + str(y.shape)) + return x, y + + def _split_train_val_test(self, x, y): + test_rate = 1 - self.train_rate - self.eval_rate + num_samples = x.shape[0] + num_test = round(num_samples * test_rate) + num_train = round(num_samples * self.train_rate) + num_val = num_samples - num_test - num_train + + x_train, y_train = x[int(num_train*(1 - self.part_train_rate)):num_train], y[int(num_train*(1 - self.part_train_rate)):num_train] + x_val, y_val = x[num_train: num_train + num_val], y[num_train: num_train + num_val] + x_test, y_test = x[-num_test:], y[-num_test:] + self._logger.info("train\t" + "x: " + str(x_train.shape) + ", y: " + str(y_train.shape)) + self._logger.info("eval\t" + "x: " + str(x_val.shape) + ", y: " + str(y_val.shape)) + self._logger.info("test\t" + "x: " + str(x_test.shape) + ", y: " + str(y_test.shape)) + + if self.rank == 0 and self.cache_dataset: + ensure_dir(self.cache_file_folder) + np.savez_compressed( + self.cache_file_name, + x_train=x_train, + y_train=y_train, + x_test=x_test, + y_test=y_test, + x_val=x_val, + y_val=y_val, + ) + self._logger.info('Saved at ' + self.cache_file_name) + return x_train, y_train, x_val, y_val, x_test, y_test + + def _generate_train_val_test(self): + x, y = self._generate_data() + return self._split_train_val_test(x, y) + + def _load_cache_train_val_test(self): + self._logger.info('Loading ' + self.cache_file_name) + cat_data = np.load(self.cache_file_name) + x_train = cat_data['x_train'] + y_train = cat_data['y_train'] + x_test = cat_data['x_test'] + y_test = cat_data['y_test'] + x_val = cat_data['x_val'] + y_val = cat_data['y_val'] + self._logger.info("train\t" + "x: " + str(x_train.shape) + ", y: " + str(y_train.shape)) + self._logger.info("eval\t" + "x: " + str(x_val.shape) + ", y: " + str(y_val.shape)) + self._logger.info("test\t" + "x: " + str(x_test.shape) + ", y: " + str(y_test.shape)) + return x_train, y_train, x_val, y_val, x_test, y_test + + def _get_scalar(self, scaler_type, x_train, y_train): + if scaler_type == "normal": + scaler = NormalScaler(maxx=max(x_train.max(), y_train.max())) + self._logger.info('NormalScaler max: ' + str(scaler.max)) + elif scaler_type == "standard": + scaler = StandardScaler(mean=x_train.mean(), std=x_train.std()) + self._logger.info('StandardScaler mean: ' + str(scaler.mean) + ', std: ' + str(scaler.std)) + elif scaler_type == "minmax01": + scaler = MinMax01Scaler( + maxx=max(x_train.max(), y_train.max()), minn=min(x_train.min(), y_train.min())) + self._logger.info('MinMax01Scaler max: ' + str(scaler.max) + ', min: ' + str(scaler.min)) + elif scaler_type == "minmax11": + scaler = MinMax11Scaler( + maxx=max(x_train.max(), y_train.max()), minn=min(x_train.min(), y_train.min())) + self._logger.info('MinMax11Scaler max: ' + str(scaler.max) + ', min: ' + str(scaler.min)) + elif scaler_type == "log": + scaler = LogScaler() + self._logger.info('LogScaler') + elif scaler_type == "none": + scaler = NoneScaler() + self._logger.info('NoneScaler') + else: + raise ValueError('Scaler type error!') + return scaler + + def get_data(self): + x_train, y_train, x_val, y_val, x_test, y_test = [], [], [], [], [], [] + if self.data is None: + self.data = {} + if self.cache_dataset and os.path.exists(self.cache_file_name): + x_train, y_train, x_val, y_val, x_test, y_test = self._load_cache_train_val_test() + else: + x_train, y_train, x_val, y_val, x_test, y_test = self._generate_train_val_test() + self.feature_dim = x_train.shape[-1] + self.ext_dim = self.feature_dim - self.output_dim + self.scaler = self._get_scalar(self.scaler_type, + x_train[..., :self.output_dim], y_train[..., :self.output_dim]) + self.ext_scaler = self._get_scalar(self.ext_scaler_type, + x_train[..., self.output_dim:], y_train[..., self.output_dim:]) + x_train[..., :self.output_dim] = self.scaler.transform(x_train[..., :self.output_dim]) + y_train[..., :self.output_dim] = self.scaler.transform(y_train[..., :self.output_dim]) + x_val[..., :self.output_dim] = self.scaler.transform(x_val[..., :self.output_dim]) + y_val[..., :self.output_dim] = self.scaler.transform(y_val[..., :self.output_dim]) + x_test[..., :self.output_dim] = self.scaler.transform(x_test[..., :self.output_dim]) + y_test[..., :self.output_dim] = self.scaler.transform(y_test[..., :self.output_dim]) + if self.normal_external: + x_train[..., self.output_dim:] = self.ext_scaler.transform(x_train[..., self.output_dim:]) + y_train[..., self.output_dim:] = self.ext_scaler.transform(y_train[..., self.output_dim:]) + x_val[..., self.output_dim:] = self.ext_scaler.transform(x_val[..., self.output_dim:]) + y_val[..., self.output_dim:] = self.ext_scaler.transform(y_val[..., self.output_dim:]) + x_test[..., self.output_dim:] = self.ext_scaler.transform(x_test[..., self.output_dim:]) + y_test[..., self.output_dim:] = self.ext_scaler.transform(y_test[..., self.output_dim:]) + train_data = list(zip(x_train, y_train)) + eval_data = list(zip(x_val, y_val)) + test_data = list(zip(x_test, y_test)) + self.train_dataloader, self.eval_dataloader, self.test_dataloader = \ + generate_dataloader(train_data, eval_data, test_data, self.feature_name, + self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample, + distributed=self.distributed) + self.num_batches = len(self.train_dataloader) + return self.train_dataloader, self.eval_dataloader, self.test_dataloader + + def get_data_feature(self): + raise NotImplementedError('Please implement the function `get_data_feature()`.') diff --git a/libcity/data/dataset/traffic_state_grid_dataset.py b/libcity/data/dataset/traffic_state_grid_dataset.py new file mode 100644 index 0000000..34faccf --- /dev/null +++ b/libcity/data/dataset/traffic_state_grid_dataset.py @@ -0,0 +1,41 @@ +import os + +from libcity.data.dataset import TrafficStateDataset + + +class TrafficStateGridDataset(TrafficStateDataset): + + def __init__(self, config): + super().__init__(config) + self.use_row_column = self.config.get('use_row_column', True) + self.parameters_str = self.parameters_str + '_' + str(self.use_row_column) + self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/', + 'grid_based_{}.npz'.format(self.parameters_str)) + self._load_rel() + + def _load_geo(self): + super()._load_grid_geo() + + def _load_rel(self): + if os.path.exists(self.data_path + self.rel_file + '.rel'): + super()._load_rel() + else: + super()._load_grid_rel() + + def _load_dyna(self, filename): + if self.use_row_column: + return super()._load_grid_4d(filename) + else: + return super()._load_grid_3d(filename) + + def _add_external_information(self, df, ext_data=None): + if self.use_row_column: + return super()._add_external_information_4d(df, ext_data) + else: + return super()._add_external_information_3d(df, ext_data) + + def get_data_feature(self): + return {"scaler": self.scaler, "adj_mx": self.adj_mx, + "num_nodes": self.num_nodes, "feature_dim": self.feature_dim, "ext_dim": self.ext_dim, + "output_dim": self.output_dim, "len_row": self.len_row, "len_column": self.len_column, + "num_batches": self.num_batches} diff --git a/libcity/data/dataset/traffic_state_point_dataset.py b/libcity/data/dataset/traffic_state_point_dataset.py new file mode 100644 index 0000000..6c98890 --- /dev/null +++ b/libcity/data/dataset/traffic_state_point_dataset.py @@ -0,0 +1,28 @@ +import os + +from libcity.data.dataset import TrafficStateDataset + + +class TrafficStatePointDataset(TrafficStateDataset): + + def __init__(self, config): + super().__init__(config) + self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/', + 'point_based_{}.npz'.format(self.parameters_str)) + + def _load_geo(self): + super()._load_geo() + + def _load_rel(self): + super()._load_rel() + + def _load_dyna(self, filename): + return super()._load_dyna_3d(filename) + + def _add_external_information(self, df, ext_data=None): + return super()._add_external_information_3d(df, ext_data) + + def get_data_feature(self): + return {"scaler": self.scaler, "adj_mx": self.adj_mx, "ext_dim": self.ext_dim, + "num_nodes": self.num_nodes, "feature_dim": self.feature_dim, + "output_dim": self.output_dim, "num_batches": self.num_batches} diff --git a/libcity/data/list_dataset.py b/libcity/data/list_dataset.py new file mode 100644 index 0000000..c6577cf --- /dev/null +++ b/libcity/data/list_dataset.py @@ -0,0 +1,12 @@ +from torch.utils.data import Dataset + + +class ListDataset(Dataset): + def __init__(self, data): + self.data = data + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return len(self.data) diff --git a/libcity/data/utils.py b/libcity/data/utils.py new file mode 100644 index 0000000..ce9cbc4 --- /dev/null +++ b/libcity/data/utils.py @@ -0,0 +1,57 @@ +import importlib +import numpy as np +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler +import copy + +from libcity.data.list_dataset import ListDataset +from libcity.data.batch import Batch + + +def get_dataset(config): + try: + return getattr(importlib.import_module('libcity.data.dataset'), + config['dataset_class'])(config) + except AttributeError: + raise AttributeError('dataset_class is not found') + + +def generate_dataloader(train_data, eval_data, test_data, feature_name, + batch_size, num_workers, pad_item=None, + pad_max_len=None, shuffle=True, + pad_with_last_sample=False, distributed=False): + if pad_with_last_sample: + num_padding = (batch_size - (len(train_data) % batch_size)) % batch_size + data_padding = np.repeat(train_data[-1:], num_padding, axis=0) + train_data = np.concatenate([train_data, data_padding], axis=0) + num_padding = (batch_size - (len(eval_data) % batch_size)) % batch_size + data_padding = np.repeat(eval_data[-1:], num_padding, axis=0) + eval_data = np.concatenate([eval_data, data_padding], axis=0) + num_padding = (batch_size - (len(test_data) % batch_size)) % batch_size + data_padding = np.repeat(test_data[-1:], num_padding, axis=0) + test_data = np.concatenate([test_data, data_padding], axis=0) + train_dataset = ListDataset(train_data) + eval_dataset = ListDataset(eval_data) + test_dataset = ListDataset(test_data) + train_sampler = None + eval_sampler = None + if distributed: + train_sampler = DistributedSampler(train_dataset) + eval_sampler = DistributedSampler(eval_dataset) + + def collator(indices): + batch = Batch(feature_name, pad_item, pad_max_len) + for item in indices: + batch.append(copy.deepcopy(item)) + batch.padding() + return batch + train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, + num_workers=num_workers, collate_fn=collator, + shuffle=shuffle and train_sampler is None, sampler=train_sampler) + eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=batch_size, + num_workers=num_workers, collate_fn=collator, + shuffle=shuffle and eval_sampler is None, sampler=eval_sampler) + test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, + num_workers=num_workers, collate_fn=collator, + shuffle=False) + return train_dataloader, eval_dataloader, test_dataloader diff --git a/libcity/evaluator/__init__.py b/libcity/evaluator/__init__.py new file mode 100644 index 0000000..5d68737 --- /dev/null +++ b/libcity/evaluator/__init__.py @@ -0,0 +1,7 @@ +from libcity.evaluator.traffic_state_evaluator import TrafficStateEvaluator +from libcity.evaluator.traffic_state_grid_evaluator import TrafficStateGridEvaluator + +__all__ = [ + "TrafficStateEvaluator", + "TrafficStateGridEvaluator", +] diff --git a/libcity/evaluator/abstract_evaluator.py b/libcity/evaluator/abstract_evaluator.py new file mode 100644 index 0000000..772f466 --- /dev/null +++ b/libcity/evaluator/abstract_evaluator.py @@ -0,0 +1,16 @@ +class AbstractEvaluator(object): + + def __init__(self, config): + raise NotImplementedError('evaluator not implemented') + + def collect(self, batch): + raise NotImplementedError('evaluator collect not implemented') + + def evaluate(self): + raise NotImplementedError('evaluator evaluate not implemented') + + def save_result(self, save_path, filename=None): + raise NotImplementedError('evaluator save_result not implemented') + + def clear(self): + raise NotImplementedError('evaluator clear not implemented') diff --git a/libcity/evaluator/eval_funcs.py b/libcity/evaluator/eval_funcs.py new file mode 100644 index 0000000..cba8ecf --- /dev/null +++ b/libcity/evaluator/eval_funcs.py @@ -0,0 +1,62 @@ +import numpy as np +import torch + + +def mse(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), 'MSE' + return np.mean(sum(pow(loc_pred - loc_true, 2))) + + +def mae(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), 'MAE' + return np.mean(sum(loc_pred - loc_true)) + + +def rmse(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), 'RMSE' + return np.sqrt(np.mean(sum(pow(loc_pred - loc_true, 2)))) + + +def mape(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), 'MAPE' + assert 0 not in loc_true, "MAPE:" + return np.mean(abs(loc_pred - loc_true) / loc_true) + + +def mare(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), "MARE" + assert np.sum(loc_true) != 0, "MARE" + return np.sum(np.abs(loc_pred - loc_true)) / np.sum(loc_true) + + +def smape(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), 'SMAPE' + assert 0 in (loc_pred + loc_true), "SMAPE" + return 2.0 * np.mean(np.abs(loc_pred - loc_true) / (np.abs(loc_pred) + + np.abs(loc_true))) + + +def acc(loc_pred, loc_true): + assert len(loc_pred) == len(loc_true), "accuracy" + loc_diff = loc_pred - loc_true + loc_diff[loc_diff != 0] = 1 + return loc_diff, np.mean(loc_diff == 0) + + +def top_k(loc_pred, loc_true, topk): + assert topk > 0, "top-k ACC" + loc_pred = torch.FloatTensor(loc_pred) + val, index = torch.topk(loc_pred, topk, 1) + index = index.numpy() + hit = 0 + rank = 0.0 + dcg = 0.0 + for i, p in enumerate(index): + target = loc_true[i] + if target in p: + hit += 1 + rank_list = list(p) + rank_index = rank_list.index(target) + rank += 1.0 / (rank_index + 1) + dcg += 1.0 / np.log2(rank_index + 2) + return hit, rank, dcg diff --git a/libcity/evaluator/traffic_state_evaluator.py b/libcity/evaluator/traffic_state_evaluator.py new file mode 100644 index 0000000..0470689 --- /dev/null +++ b/libcity/evaluator/traffic_state_evaluator.py @@ -0,0 +1,152 @@ +import os +import json +import datetime +import pandas as pd +from libcity.utils import ensure_dir +from libcity.model import loss +from logging import getLogger +from libcity.evaluator.abstract_evaluator import AbstractEvaluator + + +class TrafficStateEvaluator(AbstractEvaluator): + + def __init__(self, config): + self.metrics = config.get('metrics', ['MAE']) + self.allowed_metrics = ['MAE', 'MSE', 'RMSE', 'MAPE', 'masked_MAE', + 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'R2', 'EVAR'] + self.save_modes = config.get('save_modes', ['csv', 'json']) + self.mode = config.get('mode', 'single') + self.config = config + self.len_timeslots = 0 + self.result = {} + self.intermediate_result = {} + self._check_config() + self._logger = getLogger() + + def _check_config(self): + if not isinstance(self.metrics, list): + raise TypeError('Evaluator type is not list') + for metric in self.metrics: + if metric not in self.allowed_metrics: + raise ValueError('the metric {} is not allowed in TrafficStateEvaluator'.format(str(metric))) + + def collect(self, batch): + if not isinstance(batch, dict): + raise TypeError('evaluator.collect input is not a dict of user') + y_true = batch['y_true'] + y_pred = batch['y_pred'] + if y_true.shape != y_pred.shape: + raise ValueError("batch['y_true'].shape is not equal to batch['y_pred'].shape") + self.len_timeslots = y_true.shape[1] + for i in range(1, self.len_timeslots+1): + for metric in self.metrics: + if metric+'@'+str(i) not in self.intermediate_result: + self.intermediate_result[metric+'@'+str(i)] = [] + if self.mode.lower() == 'average': + for i in range(1, self.len_timeslots+1): + for metric in self.metrics: + if metric == 'masked_MAE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i], 0).item()) + elif metric == 'masked_MSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i], 0).item()) + elif metric == 'masked_RMSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i], 0).item()) + elif metric == 'masked_MAPE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i], 0).item()) + elif metric == 'MAE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif metric == 'MSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif metric == 'RMSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif metric == 'MAPE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif metric == 'R2': + self.intermediate_result[metric + '@' + str(i)].append( + loss.r2_score_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif metric == 'EVAR': + self.intermediate_result[metric + '@' + str(i)].append( + loss.explained_variance_score_torch(y_pred[:, :i], y_true[:, :i]).item()) + elif self.mode.lower() == 'single': + for i in range(1, self.len_timeslots + 1): + for metric in self.metrics: + if metric == 'masked_MAE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1], 0).item()) + elif metric == 'masked_MSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item()) + elif metric == 'masked_RMSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item()) + elif metric == 'masked_MAPE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1], 0).item()) + elif metric == 'MAE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + elif metric == 'MSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + elif metric == 'RMSE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + elif metric == 'MAPE': + self.intermediate_result[metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + elif metric == 'R2': + self.intermediate_result[metric + '@' + str(i)].append( + loss.r2_score_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + elif metric == 'EVAR': + self.intermediate_result[metric + '@' + str(i)].append( + loss.explained_variance_score_torch(y_pred[:, i-1], y_true[:, i-1]).item()) + else: + raise ValueError('Error parameter evaluator_mode={}, please set `single` or `average`.'.format(self.mode)) + + def evaluate(self): + for i in range(1, self.len_timeslots + 1): + for metric in self.metrics: + self.result[metric+'@'+str(i)] = sum(self.intermediate_result[metric+'@'+str(i)]) / \ + len(self.intermediate_result[metric+'@'+str(i)]) + return self.result + + def save_result(self, save_path, filename=None): + self._logger.info('Note that you select the {} mode to evaluate!'.format(self.mode)) + self.evaluate() + ensure_dir(save_path) + if filename is None: + filename = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_' + \ + self.config['model'] + '_' + self.config['dataset'] + '_' + self.mode + + if 'json' in self.save_modes: + self._logger.info('Evaluate result is ' + json.dumps(self.result)) + with open(os.path.join(save_path, '{}.json'.format(filename)), 'w') as f: + json.dump(self.result, f) + self._logger.info('Evaluate result is saved at ' + + os.path.join(save_path, '{}.json'.format(filename))) + + dataframe = {} + if 'csv' in self.save_modes: + for metric in self.metrics: + dataframe[metric] = [] + for i in range(1, self.len_timeslots + 1): + for metric in self.metrics: + dataframe[metric].append(self.result[metric+'@'+str(i)]) + dataframe = pd.DataFrame(dataframe, index=range(1, self.len_timeslots + 1)) + dataframe.to_csv(os.path.join(save_path, '{}.csv'.format(filename)), index=False) + self._logger.info('Evaluate result is saved at ' + + os.path.join(save_path, '{}.csv'.format(filename))) + self._logger.info("\n" + str(dataframe)) + return dataframe + + def clear(self): + self.result = {} + self.intermediate_result = {} diff --git a/libcity/evaluator/traffic_state_grid_evaluator.py b/libcity/evaluator/traffic_state_grid_evaluator.py new file mode 100644 index 0000000..a38bf3e --- /dev/null +++ b/libcity/evaluator/traffic_state_grid_evaluator.py @@ -0,0 +1,137 @@ +import os +import json +import datetime +import pandas as pd +from libcity.utils import ensure_dir +from libcity.model import loss +from libcity.evaluator.traffic_state_evaluator import TrafficStateEvaluator + + +class TrafficStateGridEvaluator(TrafficStateEvaluator): + + def __init__(self, config): + super().__init__(config) + self.output_dim = self.config.get('output_dim', 1) + self.mask_val = self.config.get('mask_val', 10) + + def collect(self, batch): + if not isinstance(batch, dict): + raise TypeError('evaluator.collect input is not a dict of user') + y_true = batch['y_true'] + y_pred = batch['y_pred'] + if y_true.shape != y_pred.shape: + raise ValueError("batch['y_true'].shape is not equal to batch['y_pred'].shape") + self.len_timeslots = y_true.shape[1] + for j in range(self.output_dim): + for i in range(1, self.len_timeslots+1): + for metric in self.metrics: + if str(j)+'-'+metric+'@'+str(i) not in self.intermediate_result: + self.intermediate_result[str(j)+'-'+metric+'@'+str(i)] = [] + if self.mode.lower() == 'average': + for j in range(self.output_dim): + for i in range(1, self.len_timeslots+1): + for metric in self.metrics: + if metric == 'masked_MAE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_MSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_RMSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_MAPE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j], 0, self.mask_val).item()) + elif metric == 'MAE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif metric == 'MSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif metric == 'RMSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif metric == 'MAPE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif metric == 'R2': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.r2_score_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif metric == 'EVAR': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.explained_variance_score_torch(y_pred[:, :i, ..., j], y_true[:, :i, ..., j]).item()) + elif self.mode.lower() == 'single': + for j in range(self.output_dim): + for i in range(1, self.len_timeslots+1): + for metric in self.metrics: + if metric == 'masked_MAE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_MSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_RMSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j], 0, self.mask_val).item()) + elif metric == 'masked_MAPE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j], 0, self.mask_val).item()) + elif metric == 'MAE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mae_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + elif metric == 'MSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mse_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + elif metric == 'RMSE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_rmse_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + elif metric == 'MAPE': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.masked_mape_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + elif metric == 'R2': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.r2_score_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + elif metric == 'EVAR': + self.intermediate_result[str(j) + '-' + metric + '@' + str(i)].append( + loss.explained_variance_score_torch(y_pred[:, i-1, ..., j], y_true[:, i-1, ..., j]).item()) + else: + raise ValueError('Error parameter evaluator_mode={}, please set `single` or `average`.'.format(self.mode)) + + def evaluate(self): + for j in range(self.output_dim): + for i in range(1, self.len_timeslots + 1): + for metric in self.metrics: + self.result[str(j)+'-'+metric+'@'+str(i)] = sum(self.intermediate_result[str(j)+'-'+metric+'@'+str(i)]) / \ + len(self.intermediate_result[str(j)+'-'+metric+'@'+str(i)]) + return self.result + + def save_result(self, save_path, filename=None): + self._logger.info('Note that you select the {} mode to evaluate!'.format(self.mode)) + self.evaluate() + ensure_dir(save_path) + if filename is None: + filename = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_' + \ + self.config['model'] + '_' + self.config['dataset'] + + if 'json' in self.save_modes: + self._logger.info('Evaluate result is ' + json.dumps(self.result)) + with open(os.path.join(save_path, '{}.json'.format(filename)), 'w') as f: + json.dump(self.result, f) + self._logger.info('Evaluate result is saved at ' + + os.path.join(save_path, '{}.json'.format(filename))) + + dataframe = {} + if 'csv' in self.save_modes: + for j in range(self.output_dim): + for metric in self.metrics: + dataframe[str(j)+"-"+metric] = [] + for i in range(1, self.len_timeslots + 1): + for metric in self.metrics: + dataframe[str(j)+"-"+metric].append(self.result[str(j)+'-'+metric+'@'+str(i)]) + dataframe = pd.DataFrame(dataframe, index=range(1, self.len_timeslots + 1)) + dataframe.to_csv(os.path.join(save_path, '{}.csv'.format(filename)), index=False) + self._logger.info('Evaluate result is saved at ' + + os.path.join(save_path, '{}.csv'.format(filename))) + self._logger.info("\n" + str(dataframe)) + return dataframe diff --git a/libcity/evaluator/utils.py b/libcity/evaluator/utils.py new file mode 100644 index 0000000..5f9e0bc --- /dev/null +++ b/libcity/evaluator/utils.py @@ -0,0 +1,119 @@ +import json +from heapq import nlargest +import pandas as pd +from libcity.model.loss import * + + +def output(method, value, field): + if method == 'ACC': + if field == 'model': + print('---- {} avg_acc={:.3f} ----'.format(method, + value)) + else: + print('{} avg_acc={:.3f}'.format(method, value)) + elif method in ['MSE', 'RMSE', 'MAE', 'MAPE', 'MARE', 'SMAPE']: + if field == 'model': + print('---- {} avg_loss={:.3f} ----'.format(method, + value)) + else: + print('{} avg_loss={:.3f}'.format(method, value)) + else: + if field == 'model': + print('---- {} avg_acc={:.3f} ----'.format(method, + value)) + else: + print('{} avg_acc={:.3f}'.format(method, value)) + + +def transfer_data(data, model, maxk): + if type(data) == str: + data = json.loads(data) + assert type(data) == dict, "invalid" + if model == 'DeepMove': + user_idx = data.keys() + for user_id in user_idx: + trace_idx = data[user_id].keys() + for trace_id in trace_idx: + trace = data[user_id][trace_id] + loc_pred = trace['loc_pred'] + new_loc_pred = [] + for t_list in loc_pred: + new_loc_pred.append(sort_confidence_ids(t_list, maxk)) + data[user_id][trace_id]['loc_pred'] = new_loc_pred + return data + + +def sort_confidence_ids(confidence_list, threshold): + max_score_with_id = nlargest( + threshold, enumerate(confidence_list), lambda x: x[1]) + return list(map(lambda x: x[0], max_score_with_id)) + + +def evaluate_model(y_pred, y_true, metrics, mode='single', path='metrics.csv'): + if y_true.shape != y_pred.shape: + raise ValueError("y_true.shape is not equal to y_pred.shape") + len_timeslots = y_true.shape[1] + if isinstance(y_pred, np.ndarray): + y_pred = torch.FloatTensor(y_pred) + if isinstance(y_true, np.ndarray): + y_true = torch.FloatTensor(y_true) + assert isinstance(y_pred, torch.Tensor) + assert isinstance(y_true, torch.Tensor) + + df = [] + for i in range(1, len_timeslots + 1): + line = {} + for metric in metrics: + if mode.lower() == 'single': + if metric == 'masked_MAE': + line[metric] = masked_mae_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item() + elif metric == 'masked_MSE': + line[metric] = masked_mse_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item() + elif metric == 'masked_RMSE': + line[metric] = masked_rmse_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item() + elif metric == 'masked_MAPE': + line[metric] = masked_mape_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item() + elif metric == 'MAE': + line[metric] = masked_mae_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + elif metric == 'MSE': + line[metric] = masked_mse_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + elif metric == 'RMSE': + line[metric] = masked_rmse_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + elif metric == 'MAPE': + line[metric] = masked_mape_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + elif metric == 'R2': + line[metric] = r2_score_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + elif metric == 'EVAR': + line[metric] = explained_variance_score_torch(y_pred[:, i - 1], y_true[:, i - 1]).item() + else: + raise ValueError('Error parameter mode={}, please set `single` or `average`.'.format(mode)) + elif mode.lower() == 'average': + if metric == 'masked_MAE': + line[metric] = masked_mae_torch(y_pred[:, :i], y_true[:, :i], 0).item() + elif metric == 'masked_MSE': + line[metric] = masked_mse_torch(y_pred[:, :i], y_true[:, :i], 0).item() + elif metric == 'masked_RMSE': + line[metric] = masked_rmse_torch(y_pred[:, :i], y_true[:, :i], 0).item() + elif metric == 'masked_MAPE': + line[metric] = masked_mape_torch(y_pred[:, :i], y_true[:, :i], 0).item() + elif metric == 'MAE': + line[metric] = masked_mae_torch(y_pred[:, :i], y_true[:, :i]).item() + elif metric == 'MSE': + line[metric] = masked_mse_torch(y_pred[:, :i], y_true[:, :i]).item() + elif metric == 'RMSE': + line[metric] = masked_rmse_torch(y_pred[:, :i], y_true[:, :i]).item() + elif metric == 'MAPE': + line[metric] = masked_mape_torch(y_pred[:, :i], y_true[:, :i]).item() + elif metric == 'R2': + line[metric] = r2_score_torch(y_pred[:, :i], y_true[:, :i]).item() + elif metric == 'EVAR': + line[metric] = explained_variance_score_torch(y_pred[:, :i], y_true[:, :i]).item() + else: + raise ValueError('Error parameter metric={}!'.format(metric)) + else: + raise ValueError('Error parameter evaluator_mode={}, please set `single` or `average`.'.format(mode)) + df.append(line) + df = pd.DataFrame(df, columns=metrics) + print(df) + df.to_csv(path) + return df diff --git a/libcity/executor/__init__.py b/libcity/executor/__init__.py new file mode 100644 index 0000000..73518b8 --- /dev/null +++ b/libcity/executor/__init__.py @@ -0,0 +1,8 @@ +from libcity.executor.traffic_state_executor import TrafficStateExecutor +from libcity.executor.pdformer_executor import PDFormerExecutor + + +__all__ = [ + "TrafficStateExecutor", + "PDFormerExecutor", +] diff --git a/libcity/executor/abstract_executor.py b/libcity/executor/abstract_executor.py new file mode 100644 index 0000000..304f6ca --- /dev/null +++ b/libcity/executor/abstract_executor.py @@ -0,0 +1,16 @@ +class AbstractExecutor(object): + + def __init__(self, config, model): + raise NotImplementedError("Executor not implemented") + + def train(self, train_dataloader, eval_dataloader): + raise NotImplementedError("Executor train not implemented") + + def evaluate(self, test_dataloader): + raise NotImplementedError("Executor evaluate not implemented") + + def load_model(self, cache_name): + raise NotImplementedError("Executor load cache not implemented") + + def save_model(self, cache_name): + raise NotImplementedError("Executor save cache not implemented") diff --git a/libcity/executor/pdformer_executor.py b/libcity/executor/pdformer_executor.py new file mode 100644 index 0000000..a12ae28 --- /dev/null +++ b/libcity/executor/pdformer_executor.py @@ -0,0 +1,299 @@ +import time +import numpy as np +import torch +import os +from libcity.executor.scheduler import CosineLRScheduler +from ray import tune +from libcity.executor.traffic_state_executor import TrafficStateExecutor +import scipy.sparse as sp +from libcity.utils import reduce_array +from tqdm import tqdm + + +class PDFormerExecutor(TrafficStateExecutor): + + def __init__(self, config, model): + self.no_load = config.get('no_load', []) + self.lr_warmup_epoch = config.get("lr_warmup_epoch", 5) + self.lr_warmup_init = config.get("lr_warmup_init", 1e-6) + self.lape_dim = config.get('lape_dim', 200) + self.adj_mx = model.get_data_feature().get('adj_mx') + super().__init__(config, model) + self.lap_mx = self._cal_lape(self.adj_mx).to(self.device) + self.random_flip = config.get('random_flip', True) + self.set_loss = config.get('set_loss', 'masked_mae') + + def check_noload(self, k): + for no_load_para in self.no_load: + if no_load_para in k: + return True + return False + + def load_model_with_initial_ckpt(self, initial_ckpt): + assert os.path.exists(initial_ckpt), 'Weights at %s not found' % initial_ckpt + model_state, optimizer_state = torch.load(initial_ckpt, map_location=torch.device('cpu')) + model_keys = self.model.state_dict() + state_dict_load = {} + unexpect_keys = [] + for k, v in model_state.items(): + if k not in model_keys.keys() or v.shape != model_keys[k].shape or self.check_noload(k): + unexpect_keys.append(k) + else: + state_dict_load[k] = v + for k, v in model_keys.items(): + if k not in model_state.keys(): + unexpect_keys.append(k) + self._logger.info("unexpected keys: {}".format(unexpect_keys)) + self.model.load_state_dict(state_dict_load, strict=False) + self._logger.info("Initialize model from {}".format(initial_ckpt)) + + def _calculate_normalized_laplacian(self, adj): + adj = sp.coo_matrix(adj) + d = np.array(adj.sum(1)) + isolated_point_num = np.sum(np.where(d, 0, 1)) + self._logger.info(f"Number of isolated points: {isolated_point_num}") + d_inv_sqrt = np.power(d, -0.5).flatten() + d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. + d_mat_inv_sqrt = sp.diags(d_inv_sqrt) + normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() + return normalized_laplacian, isolated_point_num + + def _calculate_random_walk_laplacian(self, adj): + adj = sp.coo_matrix(adj) + d = np.array(adj.sum(1)) + isolated_point_num = np.sum(np.where(d, 0, 1)) + d_inv = np.power(d, -1).flatten() + d_inv[np.isinf(d_inv)] = 0. + d_mat_inv = sp.diags(d_inv) + random_walk_mx = sp.eye(adj.shape[0]) - d_mat_inv.dot(adj).tocoo() + return random_walk_mx, isolated_point_num + + def _cal_lape(self, adj_mx): + L, isolated_point_num = self._calculate_normalized_laplacian(adj_mx) + EigVal, EigVec = np.linalg.eig(L.toarray()) + idx = EigVal.argsort() + EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx]) + + laplacian_pe = torch.from_numpy(EigVec[:, isolated_point_num + 1: self.lape_dim + isolated_point_num + 1]).float() + laplacian_pe.require_grad = False + return laplacian_pe + + def _build_optimizer(self): + self._logger.info('You select `{}` optimizer.'.format(self.learner.lower())) + if self.learner.lower() == 'adam': + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, betas=self.lr_betas, weight_decay=self.weight_decay) + elif self.learner.lower() == 'sgd': + optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, + momentum=self.lr_momentum, weight_decay=self.weight_decay) + elif self.learner.lower() == 'adagrad': + optimizer = torch.optim.Adagrad(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, weight_decay=self.weight_decay) + elif self.learner.lower() == 'rmsprop': + optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, + alpha=self.lr_alpha, eps=self.lr_epsilon, + momentum=self.lr_momentum, weight_decay=self.weight_decay) + elif self.learner.lower() == 'sparse_adam': + optimizer = torch.optim.SparseAdam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, betas=self.lr_betas) + elif self.learner.lower() == 'adamw': + optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, betas=self.lr_betas, weight_decay=self.weight_decay) + else: + self._logger.warning('Received unrecognized optimizer, set default Adam optimizer') + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, weight_decay=self.weight_decay) + return optimizer + + def _build_lr_scheduler(self): + if self.lr_decay: + self._logger.info('You select `{}` lr_scheduler.'.format(self.lr_scheduler_type.lower())) + if self.lr_scheduler_type.lower() == 'multisteplr': + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( + self.optimizer, milestones=self.milestones, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'steplr': + lr_scheduler = torch.optim.lr_scheduler.StepLR( + self.optimizer, step_size=self.step_size, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'exponentiallr': + lr_scheduler = torch.optim.lr_scheduler.ExponentialLR( + self.optimizer, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'cosineannealinglr': + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + self.optimizer, T_max=self.lr_T_max, eta_min=self.lr_eta_min) + elif self.lr_scheduler_type.lower() == 'lambdalr': + lr_scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, lr_lambda=self.lr_lambda) + elif self.lr_scheduler_type.lower() == 'reducelronplateau': + lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, mode='min', patience=self.lr_patience, + factor=self.lr_decay_ratio, threshold=self.lr_threshold) + elif self.lr_scheduler_type.lower() == 'cosinelr': + lr_scheduler = CosineLRScheduler( + self.optimizer, t_initial=self.epochs, lr_min=self.lr_eta_min, decay_rate=self.lr_decay_ratio, + warmup_t=self.lr_warmup_epoch, warmup_lr_init=self.lr_warmup_init) + else: + self._logger.warning('Received unrecognized lr_scheduler, ' + 'please check the parameter `lr_scheduler`.') + lr_scheduler = None + else: + lr_scheduler = None + return lr_scheduler + + def train(self, train_dataloader, eval_dataloader): + self._logger.info('Start training ...') + min_val_loss = float('inf') + wait = 0 + best_epoch = 0 + train_time = [] + eval_time = [] + num_batches = len(train_dataloader) + self._logger.info("num_batches:{}".format(num_batches)) + + batches_seen = num_batches * self._epoch_num + for epoch_idx in range(self._epoch_num, self.epochs): + start_time = time.time() + losses, batches_seen = self._train_epoch(train_dataloader, epoch_idx, batches_seen, self.loss_func) + t1 = time.time() + train_time.append(t1 - start_time) + train_loss = np.mean(losses) + if self.distributed: + train_loss = reduce_array(train_loss, self.world_size, self.device) + self._writer.add_scalar('training loss', train_loss, batches_seen) + self._logger.info("epoch complete!") + + self._logger.info("evaluating now!") + t2 = time.time() + val_loss = self._valid_epoch(eval_dataloader, epoch_idx, batches_seen, self.loss_func) + end_time = time.time() + eval_time.append(end_time - t2) + + epoch_time = end_time - start_time + if self.distributed: + epoch_time = reduce_array(np.array(epoch_time), self.world_size, self.device) + + if self.lr_scheduler is not None: + if self.lr_scheduler_type.lower() == 'reducelronplateau': + self.lr_scheduler.step(val_loss) + elif self.lr_scheduler_type.lower() == 'cosinelr': + self.lr_scheduler.step(epoch_idx + 1) + else: + self.lr_scheduler.step() + + if (epoch_idx % self.log_every) == 0: + log_lr = self.optimizer.param_groups[0]['lr'] + message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_loss: {:.4f}, lr: {:.6f}, {:.2f}s'. \ + format(epoch_idx, self.epochs, batches_seen, train_loss, val_loss, log_lr, epoch_time) + self._logger.info(message) + + if self.hyper_tune: + with tune.checkpoint_dir(step=epoch_idx) as checkpoint_dir: + path = os.path.join(checkpoint_dir, "checkpoint") + self.save_model(path) + tune.report(loss=val_loss) + + if val_loss < min_val_loss: + wait = 0 + if self.saved: + model_file_name = self.save_model_with_epoch(epoch_idx) + self._logger.info('Val loss decrease from {:.4f} to {:.4f}, ' + 'saving to {}'.format(min_val_loss, val_loss, model_file_name)) + min_val_loss = val_loss + best_epoch = epoch_idx + else: + wait += 1 + if wait == self.patience and self.use_early_stop: + self._logger.warning('Early stopping at epoch: %d' % epoch_idx) + break + if len(train_time) > 0: + average_train_time = sum(train_time) / len(train_time) + average_eval_time = sum(eval_time) / len(eval_time) + if self.distributed: + average_train_time = reduce_array(average_train_time, self.world_size, self.device) + average_eval_time = reduce_array(average_eval_time, self.world_size, self.device) + self._logger.info('Trained totally {} epochs, average train time is {:.3f}s, ' + 'average eval time is {:.3f}s'. + format(len(train_time), average_train_time, average_eval_time)) + if self.load_best_epoch: + self.load_model_with_epoch(best_epoch) + return min_val_loss + + def _train_epoch(self, train_dataloader, epoch_idx, batches_seen=None, loss_func=None): + self.model.train() + if loss_func is None: + if self.distributed: + loss_func = self.model.module.calculate_loss_without_predict + else: + loss_func = self.model.calculate_loss_without_predict + losses = [] + for batch in train_dataloader: + batch.to_tensor(self.device) + batch_lap_pos_enc = self.lap_mx.to(self.device) + if self.random_flip: + sign_flip = torch.rand(batch_lap_pos_enc.size(1)).to(self.device) + sign_flip[sign_flip >= 0.5] = 1.0 + sign_flip[sign_flip < 0.5] = -1.0 + batch_lap_pos_enc = batch_lap_pos_enc * sign_flip.unsqueeze(0) + y_true = batch['y'] + y_predicted = self.model(batch, batch_lap_pos_enc) + loss = loss_func(y_true, y_predicted, batches_seen=batches_seen, set_loss=self.set_loss) + self._logger.debug(loss.item()) + losses.append(loss.item()) + batches_seen += 1 + loss = loss / self.grad_accmu_steps + loss.backward() + if self.clip_grad_norm: + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm) + if batches_seen % self.grad_accmu_steps == 0: + self.optimizer.step() + if self.lr_scheduler is not None: + if self.lr_scheduler_type.lower() == 'cosinelr': + self.lr_scheduler.step_update(num_updates=batches_seen) + self.optimizer.zero_grad() + return losses, batches_seen + + def _valid_epoch(self, eval_dataloader, epoch_idx, batches_seen=None, loss_func=None): + with torch.no_grad(): + self.model.eval() + if loss_func is None: + if self.distributed: + loss_func = self.model.module.calculate_loss_without_predict + else: + loss_func = self.model.calculate_loss_without_predict + losses = [] + for batch in eval_dataloader: + batch.to_tensor(self.device) + y_true = batch['y'] + y_predicted = self.model(batch, self.lap_mx) + loss = loss_func(y_true, y_predicted, batches_seen=batches_seen, set_loss=self.set_loss) + self._logger.debug(loss.item()) + losses.append(loss.item()) + mean_loss = np.mean(losses) + if self.distributed: + mean_loss = reduce_array(mean_loss, self.world_size, self.device) + self._writer.add_scalar('eval loss', mean_loss, batches_seen) + return mean_loss + + def evaluate(self, test_dataloader): + self._logger.info('Start evaluating ...') + with torch.no_grad(): + self.model.eval() + y_truths = [] + y_preds = [] + for batch in test_dataloader: + batch.to_tensor(self.device) + output = self.model.predict(batch, lap_mx=self.lap_mx) + y_true = self._scaler.inverse_transform(batch['y'][..., :self.output_dim]) + y_pred = self._scaler.inverse_transform(output[..., :self.output_dim]) + y_truths.append(y_true.cpu().numpy()) + y_preds.append(y_pred.cpu().numpy()) + y_preds = np.concatenate(y_preds, axis=0) + y_truths = np.concatenate(y_truths, axis=0) + outputs = {'prediction': y_preds, 'truth': y_truths} + filename = \ + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) + '_' \ + + self.config['model'] + '_' + self.config['dataset'] + '_predictions.npz' + np.savez_compressed(os.path.join(self.evaluate_res_dir, filename), **outputs) + self.evaluator.clear() + self.evaluator.collect({'y_true': torch.tensor(y_truths), 'y_pred': torch.tensor(y_preds)}) + test_result = self.evaluator.save_result(self.evaluate_res_dir) + return test_result diff --git a/libcity/executor/scheduler.py b/libcity/executor/scheduler.py new file mode 100644 index 0000000..d1d73f7 --- /dev/null +++ b/libcity/executor/scheduler.py @@ -0,0 +1,183 @@ +import torch +import math +from logging import getLogger + + +class Scheduler: + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True): + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self): + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict): + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch): + return None + + def get_update_values(self, num_updates): + return None + + def step(self, epoch, metric=None): + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates, metric=None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs + + +class CosineLRScheduler(Scheduler): + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + t_mul: float = 1., + lr_min: float = 0., + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + cycle_limit=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + self._logger = getLogger() + if t_initial == 1 and t_mul == 1 and decay_rate == 1: + self._logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.t_mul = t_mul + self.lr_min = lr_min + self.decay_rate = decay_rate + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.t_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul)) + t_i = self.t_mul ** i * self.t_initial + t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.decay_rate ** i + lr_min = self.lr_min * gamma + lr_max_values = [v * gamma for v in self.base_values] + + if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit): + lrs = [ + lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + if not cycles: + cycles = self.cycle_limit + cycles = max(1, cycles) + if self.t_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul))) + diff --git a/libcity/executor/traffic_state_executor.py b/libcity/executor/traffic_state_executor.py new file mode 100644 index 0000000..66bc944 --- /dev/null +++ b/libcity/executor/traffic_state_executor.py @@ -0,0 +1,376 @@ +import os +import time +import numpy as np +import torch + +from ray import tune +from logging import getLogger +from torch.utils.tensorboard import SummaryWriter +from torch.nn.parallel import DistributedDataParallel as NativeDDP +from libcity.executor.abstract_executor import AbstractExecutor +from libcity.utils import get_evaluator, ensure_dir, reduce_array +from libcity.model import loss +from functools import partial + + +class TrafficStateExecutor(AbstractExecutor): + def __init__(self, config, model): + self.evaluator = get_evaluator(config) + self.config = config + self.device = self.config.get('device', torch.device('cpu')) + self.model = model.to(self.device) + self.exp_id = self.config.get('exp_id', None) + + self.cache_dir = './libcity/cache/{}/model_cache'.format(self.exp_id) + self.evaluate_res_dir = './libcity/cache/{}/evaluate_cache'.format(self.exp_id) + self.summary_writer_dir = './libcity/cache/{}'.format(self.exp_id) + ensure_dir(self.cache_dir) + ensure_dir(self.evaluate_res_dir) + ensure_dir(self.summary_writer_dir) + + self._writer = SummaryWriter(self.summary_writer_dir) + self._logger = getLogger() + self._scaler = self.model.get_data_feature().get('scaler') + self.rank = self.config.get('rank', 0) + self.distributed = self.config.get('distributed', False) + if self.distributed: + self.world_size = self.config.get('world_size', 1) + self._logger.info("Using native Torch DistributedDataParallel.") + local_rank = self.config.get('local_rank', 0) + self.model = NativeDDP(self.model, device_ids=[local_rank]) + self._logger.info(self.model) + for name, param in self.model.named_parameters(): + self._logger.info(str(name) + '\t' + str(param.shape) + '\t' + + str(param.device) + '\t' + str(param.requires_grad)) + total_num = sum([param.nelement() for param in self.model.parameters()]) + self._logger.info('Total parameter numbers: {}'.format(total_num)) + + self.epochs = self.config.get('max_epoch', 100) + self.train_loss = self.config.get('train_loss', 'none') + self.train_loss = 'none' + self.learner = self.config.get('learner', 'adam') + self.learning_rate = self.config.get('learning_rate', 0.01) + self.weight_decay = self.config.get('weight_decay', 0) + self.lr_beta1 = self.config.get('lr_beta1', 0.9) + self.lr_beta2 = self.config.get('lr_beta2', 0.999) + self.lr_betas = (self.lr_beta1, self.lr_beta2) + self.lr_alpha = self.config.get('lr_alpha', 0.99) + self.lr_epsilon = self.config.get('lr_epsilon', 1e-8) + self.lr_momentum = self.config.get('lr_momentum', 0) + self.lr_decay = self.config.get('lr_decay', False) + self.lr_scheduler_type = self.config.get('lr_scheduler', 'multisteplr') + self.lr_decay_ratio = self.config.get('lr_decay_ratio', 0.1) + self.milestones = self.config.get('steps', []) + self.step_size = self.config.get('step_size', 10) + self.lr_lambda = self.config.get('lr_lambda', lambda x: x) + self.lr_T_max = self.config.get('lr_T_max', 30) + self.lr_eta_min = self.config.get('lr_eta_min', 0) + self.lr_patience = self.config.get('lr_patience', 10) + self.lr_threshold = self.config.get('lr_threshold', 1e-4) + self.clip_grad_norm = self.config.get('clip_grad_norm', False) + self.max_grad_norm = self.config.get('max_grad_norm', 1.) + self.use_early_stop = self.config.get('use_early_stop', False) + self.patience = self.config.get('patience', 50) + self.log_every = self.config.get('log_every', 1) + self.saved = self.config.get('saved_model', True) + self.load_best_epoch = self.config.get('load_best_epoch', True) + self.hyper_tune = self.config.get('hyper_tune', False) + + self.output_dim = self.config.get('output_dim', 1) + self.optimizer = self._build_optimizer() + self.lr_scheduler = self._build_lr_scheduler() + self._epoch_num = self.config.get('epoch', 0) + if self._epoch_num > 0: + self.load_model_with_epoch(self._epoch_num) + self.loss_func = self._build_train_loss() + + self.initial_ckpt = self.config.get("initial_ckpt", None) + if self.initial_ckpt: + self.load_model_with_initial_ckpt(self.initial_ckpt) + self.grad_accmu_steps = config.get('grad_accmu_steps', 1) + self.optimizer.zero_grad() + + def load_model_with_initial_ckpt(self, initial_ckpt): + assert os.path.exists(initial_ckpt), 'Weights at %s not found' % initial_ckpt + model_state, optimizer_state = torch.load(initial_ckpt, map_location=torch.device('cpu')) + model_keys = self.model.state_dict() + state_dict_load = {} + unexpect_keys = [] + for k, v in model_state.items(): + if k not in model_keys.keys() or v.shape != model_keys[k].shape: + unexpect_keys.append(k) + else: + state_dict_load[k] = v + for k, v in model_keys.items(): + if k not in model_state.keys(): + unexpect_keys.append(k) + self._logger.info("unexpected keys: {}".format(unexpect_keys)) + self.model.load_state_dict(state_dict_load, strict=False) + self._logger.info("Initialize model from {}".format(initial_ckpt)) + + def save_model(self, cache_name): + ensure_dir(self.cache_dir) + self._logger.info("Saved model at " + cache_name) + torch.save((self.model.state_dict(), self.optimizer.state_dict()), cache_name) + + def load_model(self, cache_name): + self._logger.info("Loaded model at " + cache_name) + model_state, optimizer_state = torch.load(cache_name) + self.model.load_state_dict(model_state) + self.optimizer.load_state_dict(optimizer_state) + + def save_model_with_epoch(self, epoch): + ensure_dir(self.cache_dir) + config = dict() + config['model_state_dict'] = self.model.state_dict() + config['optimizer_state_dict'] = self.optimizer.state_dict() + config['epoch'] = epoch + model_path = self.cache_dir + '/' + self.config['model'] + '_' + self.config['dataset'] + '_epoch%d.tar' % epoch + torch.save(config, model_path) + self._logger.info("Saved model at {}".format(epoch)) + return model_path + + def load_model_with_epoch(self, epoch): + model_path = self.cache_dir + '/' + self.config['model'] + '_' + self.config['dataset'] + '_epoch%d.tar' % epoch + assert os.path.exists(model_path), 'Weights at epoch %d not found' % epoch + checkpoint = torch.load(model_path, map_location='cpu') + self.model.load_state_dict(checkpoint['model_state_dict']) + self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + self._logger.info("Loaded model at {}".format(epoch)) + + def _build_optimizer(self): + self._logger.info('You select `{}` optimizer.'.format(self.learner.lower())) + if self.learner.lower() == 'adam': + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, betas=self.lr_betas, weight_decay=self.weight_decay) + elif self.learner.lower() == 'sgd': + optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, + momentum=self.lr_momentum, weight_decay=self.weight_decay) + elif self.learner.lower() == 'adagrad': + optimizer = torch.optim.Adagrad(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, weight_decay=self.weight_decay) + elif self.learner.lower() == 'rmsprop': + optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate, + alpha=self.lr_alpha, eps=self.lr_epsilon, + momentum=self.lr_momentum, weight_decay=self.weight_decay) + elif self.learner.lower() == 'sparse_adam': + optimizer = torch.optim.SparseAdam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, betas=self.lr_betas) + else: + self._logger.warning('Received unrecognized optimizer, set default Adam optimizer') + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, + eps=self.lr_epsilon, weight_decay=self.weight_decay) + return optimizer + + def _build_lr_scheduler(self): + if self.lr_decay: + self._logger.info('You select `{}` lr_scheduler.'.format(self.lr_scheduler_type.lower())) + if self.lr_scheduler_type.lower() == 'multisteplr': + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( + self.optimizer, milestones=self.milestones, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'steplr': + lr_scheduler = torch.optim.lr_scheduler.StepLR( + self.optimizer, step_size=self.step_size, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'exponentiallr': + lr_scheduler = torch.optim.lr_scheduler.ExponentialLR( + self.optimizer, gamma=self.lr_decay_ratio) + elif self.lr_scheduler_type.lower() == 'cosineannealinglr': + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + self.optimizer, T_max=self.lr_T_max, eta_min=self.lr_eta_min) + elif self.lr_scheduler_type.lower() == 'lambdalr': + lr_scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, lr_lambda=self.lr_lambda) + elif self.lr_scheduler_type.lower() == 'reducelronplateau': + lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, mode='min', patience=self.lr_patience, + factor=self.lr_decay_ratio, threshold=self.lr_threshold) + else: + self._logger.warning('Received unrecognized lr_scheduler, ' + 'please check the parameter `lr_scheduler`.') + lr_scheduler = None + else: + lr_scheduler = None + return lr_scheduler + + def _build_train_loss(self): + if self.train_loss.lower() == 'none': + self._logger.warning('Received none train loss func and will use the loss func defined in the model.') + return None + if self.train_loss.lower() not in ['mae', 'mse', 'rmse', 'mape', 'logcosh', 'huber', 'quantile', 'masked_mae', + 'masked_mse', 'masked_rmse', 'masked_mape', 'r2', 'evar']: + self._logger.warning('Received unrecognized train loss function, set default mae loss func.') + else: + self._logger.info('You select `{}` as train loss function.'.format(self.train_loss.lower())) + + def func(batch): + y_true = batch['y'] + y_predicted = self.model.predict(batch) + y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) + y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim]) + if self.train_loss.lower() == 'mae': + lf = loss.masked_mae_torch + elif self.train_loss.lower() == 'mse': + lf = loss.masked_mse_torch + elif self.train_loss.lower() == 'rmse': + lf = loss.masked_rmse_torch + elif self.train_loss.lower() == 'mape': + lf = loss.masked_mape_torch + elif self.train_loss.lower() == 'logcosh': + lf = loss.log_cosh_loss + elif self.train_loss.lower() == 'huber': + lf = loss.huber_loss + elif self.train_loss.lower() == 'quantile': + lf = loss.quantile_loss + elif self.train_loss.lower() == 'masked_mae': + lf = partial(loss.masked_mae_torch, null_val=0) + elif self.train_loss.lower() == 'masked_mse': + lf = partial(loss.masked_mse_torch, null_val=0) + elif self.train_loss.lower() == 'masked_rmse': + lf = partial(loss.masked_rmse_torch, null_val=0) + elif self.train_loss.lower() == 'masked_mape': + lf = partial(loss.masked_mape_torch, null_val=0) + elif self.train_loss.lower() == 'r2': + lf = loss.r2_score_torch + elif self.train_loss.lower() == 'evar': + lf = loss.explained_variance_score_torch + else: + lf = loss.masked_mae_torch + return lf(y_predicted, y_true) + return func + + def evaluate(self, test_dataloader): + self._logger.info('Start evaluating ...') + with torch.no_grad(): + self.model.eval() + y_truths = [] + y_preds = [] + for batch in test_dataloader: + batch.to_tensor(self.device) + output = self.model(batch) if self.distributed else self.model.predict(batch) + y_true = self._scaler.inverse_transform(batch['y'][..., :self.output_dim]) + y_pred = self._scaler.inverse_transform(output[..., :self.output_dim]) + y_truths.append(y_true.cpu().numpy()) + y_preds.append(y_pred.cpu().numpy()) + y_preds = np.concatenate(y_preds, axis=0) + y_truths = np.concatenate(y_truths, axis=0) + outputs = {'prediction': y_preds, 'truth': y_truths} + filename = \ + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) + '_' \ + + self.config['model'] + '_' + self.config['dataset'] + '_predictions.npz' + np.savez_compressed(os.path.join(self.evaluate_res_dir, filename), **outputs) + self.evaluator.clear() + self.evaluator.collect({'y_true': torch.tensor(y_truths), 'y_pred': torch.tensor(y_preds)}) + test_result = self.evaluator.save_result(self.evaluate_res_dir) + return test_result + + def train(self, train_dataloader, eval_dataloader): + self._logger.info('Start training ...') + min_val_loss = float('inf') + wait = 0 + best_epoch = 0 + train_time = [] + eval_time = [] + num_batches = len(train_dataloader) + self._logger.info("num_batches:{}".format(num_batches)) + + batches_seen = num_batches * self._epoch_num + for epoch_idx in range(self._epoch_num, self.epochs): + start_time = time.time() + losses, batches_seen = self._train_epoch(train_dataloader, epoch_idx, batches_seen, self.loss_func) + t1 = time.time() + train_time.append(t1 - start_time) + train_loss = np.mean(losses) + if self.distributed: + train_loss = reduce_array(train_loss, self.world_size, self.device) + self._writer.add_scalar('training loss', np.mean(losses), batches_seen) + self._logger.info("epoch complete!") + + self._logger.info("evaluating now!") + t2 = time.time() + val_loss = self._valid_epoch(eval_dataloader, epoch_idx, batches_seen, self.loss_func) + end_time = time.time() + eval_time.append(end_time - t2) + + epoch_time = end_time - start_time + if self.distributed: + epoch_time = reduce_array(np.array(epoch_time), self.world_size, self.device) + + if self.lr_scheduler is not None: + if self.lr_scheduler_type.lower() == 'reducelronplateau': + self.lr_scheduler.step(val_loss) + else: + self.lr_scheduler.step() + + if (epoch_idx % self.log_every) == 0: + log_lr = self.optimizer.param_groups[0]['lr'] + message = 'Epoch [{}/{}] ({}) train_loss: {:.4f}, val_loss: {:.4f}, lr: {:.6f}, {:.2f}s'.\ + format(epoch_idx, self.epochs, batches_seen, np.mean(losses), val_loss, + log_lr, (end_time - start_time)) + self._logger.info(message) + + if self.hyper_tune: + with tune.checkpoint_dir(step=epoch_idx) as checkpoint_dir: + path = os.path.join(checkpoint_dir, "checkpoint") + self.save_model(path) + tune.report(loss=val_loss) + + if val_loss < min_val_loss: + wait = 0 + if self.saved: + model_file_name = self.save_model_with_epoch(epoch_idx) + self._logger.info('Val loss decrease from {:.4f} to {:.4f}, ' + 'saving to {}'.format(min_val_loss, val_loss, model_file_name)) + min_val_loss = val_loss + best_epoch = epoch_idx + else: + wait += 1 + if wait == self.patience and self.use_early_stop: + self._logger.warning('Early stopping at epoch: %d' % epoch_idx) + break + if len(train_time) > 0: + average_train_time = sum(train_time) / len(train_time) + average_eval_time = sum(eval_time) / len(eval_time) + if self.distributed: + average_train_time = reduce_array(average_train_time, self.world_size, self.device) + average_eval_time = reduce_array(average_eval_time, self.world_size, self.device) + self._logger.info('Trained totally {} epochs, average train time is {:.3f}s, ' + 'average eval time is {:.3f}s'. + format(len(train_time), average_train_time, average_eval_time)) + if self.load_best_epoch: + self.load_model_with_epoch(best_epoch) + return min_val_loss + + def _train_epoch(self, train_dataloader, epoch_idx, batches_seen=None, loss_func=None): + self.model.train() + loss_func = loss_func if loss_func is not None else self.model.calculate_loss + losses = [] + for batch in train_dataloader: + batch.to_tensor(self.device) + loss = loss_func(batch) + self._logger.debug(loss.item()) + losses.append(loss.item()) + batches_seen += 1 + loss = loss / self.grad_accmu_steps + loss.backward() + if self.clip_grad_norm: + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm) + if batches_seen % self.grad_accmu_steps == 0: + self.optimizer.step() + self.optimizer.zero_grad() + return losses, batches_seen + + def _valid_epoch(self, eval_dataloader, epoch_idx, batches_seen=None, loss_func=None): + with torch.no_grad(): + self.model.eval() + loss_func = loss_func if loss_func is not None else self.model.calculate_loss + losses = [] + for batch in eval_dataloader: + batch.to_tensor(self.device) + loss = loss_func(batch) + self._logger.debug(loss.item()) + losses.append(loss.item()) + mean_loss = np.mean(losses) + if self.distributed: + mean_loss = reduce_array(mean_loss, self.world_size, self.device) + self._writer.add_scalar('eval loss', mean_loss, batches_seen) + return mean_loss diff --git a/libcity/model/__init__.py b/libcity/model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libcity/model/abstract_model.py b/libcity/model/abstract_model.py new file mode 100644 index 0000000..1498d29 --- /dev/null +++ b/libcity/model/abstract_model.py @@ -0,0 +1,25 @@ +import torch.nn as nn + + +class AbstractModel(nn.Module): + + def __init__(self, config, data_feature): + nn.Module.__init__(self) + + def predict(self, batch): + """ + Args: + batch (Batch): a batch of input + + Returns: + torch.tensor: predict result of this batch + """ + + def calculate_loss(self, batch): + """ + Args: + batch (Batch): a batch of input + + Returns: + torch.tensor: return training loss + """ diff --git a/libcity/model/abstract_traffic_state_model.py b/libcity/model/abstract_traffic_state_model.py new file mode 100644 index 0000000..091e1ac --- /dev/null +++ b/libcity/model/abstract_traffic_state_model.py @@ -0,0 +1,31 @@ +from libcity.model.abstract_model import AbstractModel + + +class AbstractTrafficStateModel(AbstractModel): + + def __init__(self, config, data_feature): + self.data_feature = data_feature + super().__init__(config, data_feature) + + def predict(self, batch): + """ + + Args: + batch (Batch): a batch of input + + Returns: + torch.tensor: predict result of this batch + """ + + def calculate_loss(self, batch): + """ + + Args: + batch (Batch): a batch of input + + Returns: + torch.tensor: return training loss + """ + + def get_data_feature(self): + return self.data_feature diff --git a/libcity/model/loss.py b/libcity/model/loss.py new file mode 100644 index 0000000..c29fe2f --- /dev/null +++ b/libcity/model/loss.py @@ -0,0 +1,177 @@ +import torch +import numpy as np +from sklearn.metrics import r2_score, explained_variance_score + + +def masked_mae_loss(y_pred, y_true): + mask = (y_true != 0).float() + mask /= mask.mean() + loss = torch.abs(y_pred - y_true) + loss = loss * mask + loss[loss != loss] = 0 + return loss.mean() + + +def masked_mae_torch(preds, labels, null_val=np.nan, mask_val=np.nan): + labels[torch.abs(labels) < 1e-4] = 0 + if np.isnan(null_val): + mask = ~torch.isnan(labels) + else: + mask = labels.ne(null_val) + if not np.isnan(mask_val): + mask &= labels.ge(mask_val) + mask = mask.float() + mask /= torch.mean(mask) + mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) + loss = torch.abs(torch.sub(preds, labels)) + loss = loss * mask + loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss) + return torch.mean(loss) + + +def log_cosh_loss(preds, labels): + loss = torch.log(torch.cosh(preds - labels)) + return torch.mean(loss) + + +def huber_loss(preds, labels, delta=1.0): + residual = torch.abs(preds - labels) + condition = torch.le(residual, delta) + small_res = 0.5 * torch.square(residual) + large_res = delta * residual - 0.5 * delta * delta + return torch.mean(torch.where(condition, small_res, large_res)) + + +def masked_huber_loss(preds, labels, delta=1.0, null_val=np.nan): + labels[torch.abs(labels) < 1e-4] = 0 + if np.isnan(null_val): + mask = ~torch.isnan(labels) + else: + mask = labels.ne(null_val) + mask = mask.float() + mask /= torch.mean(mask) + mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) + residual = torch.abs(preds - labels) + condition = torch.le(residual, delta) + small_res = 0.5 * torch.square(residual) + large_res = delta * residual - 0.5 * delta * delta + loss = torch.where(condition, small_res, large_res) + loss = loss * mask + loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss) + return torch.mean(loss) + + +def quantile_loss(preds, labels, delta=0.25): + condition = torch.ge(labels, preds) + large_res = delta * (labels - preds) + small_res = (1 - delta) * (preds - labels) + return torch.mean(torch.where(condition, large_res, small_res)) + + +def masked_mape_torch(preds, labels, null_val=np.nan, mask_val=np.nan): + labels[torch.abs(labels) < 1e-4] = 0 + if np.isnan(null_val): + mask = ~torch.isnan(labels) + else: + mask = labels.ne(null_val) + if not np.isnan(mask_val): + mask &= labels.ge(mask_val) + mask = mask.float() + mask /= torch.mean(mask) + mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) + loss = torch.abs((preds - labels) / labels) + loss = loss * mask + loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss) + return torch.mean(loss) + + +def masked_mse_torch(preds, labels, null_val=np.nan, mask_val=np.nan): + labels[torch.abs(labels) < 1e-4] = 0 + if np.isnan(null_val): + mask = ~torch.isnan(labels) + else: + mask = labels.ne(null_val) + if not np.isnan(mask_val): + mask &= labels.ge(mask_val) + mask = mask.float() + mask /= torch.mean(mask) + mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) + loss = torch.square(torch.sub(preds, labels)) + loss = loss * mask + loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss) + return torch.mean(loss) + + +def masked_rmse_torch(preds, labels, null_val=np.nan, mask_val=np.nan): + labels[torch.abs(labels) < 1e-4] = 0 + return torch.sqrt(masked_mse_torch(preds=preds, labels=labels, + null_val=null_val, mask_val=mask_val)) + + +def r2_score_torch(preds, labels): + preds = preds.cpu().flatten() + labels = labels.cpu().flatten() + return r2_score(labels, preds) + + +def explained_variance_score_torch(preds, labels): + preds = preds.cpu().flatten() + labels = labels.cpu().flatten() + return explained_variance_score(labels, preds) + + +def masked_rmse_np(preds, labels, null_val=np.nan): + return np.sqrt(masked_mse_np(preds=preds, labels=labels, + null_val=null_val)) + + +def masked_mse_np(preds, labels, null_val=np.nan): + with np.errstate(divide='ignore', invalid='ignore'): + if np.isnan(null_val): + mask = ~np.isnan(labels) + else: + mask = np.not_equal(labels, null_val) + mask = mask.astype('float32') + mask /= np.mean(mask) + rmse = np.square(np.subtract(preds, labels)).astype('float32') + rmse = np.nan_to_num(rmse * mask) + return np.mean(rmse) + + +def masked_mae_np(preds, labels, null_val=np.nan): + with np.errstate(divide='ignore', invalid='ignore'): + if np.isnan(null_val): + mask = ~np.isnan(labels) + else: + mask = np.not_equal(labels, null_val) + mask = mask.astype('float32') + mask /= np.mean(mask) + mae = np.abs(np.subtract(preds, labels)).astype('float32') + mae = np.nan_to_num(mae * mask) + return np.mean(mae) + + +def masked_mape_np(preds, labels, null_val=np.nan): + with np.errstate(divide='ignore', invalid='ignore'): + if np.isnan(null_val): + mask = ~np.isnan(labels) + else: + mask = np.not_equal(labels, null_val) + mask = mask.astype('float32') + mask /= np.mean(mask) + mape = np.abs(np.divide(np.subtract( + preds, labels).astype('float32'), labels)) + mape = np.nan_to_num(mask * mape) + return np.mean(mape) + + +def r2_score_np(preds, labels): + preds = preds.flatten() + labels = labels.flatten() + return r2_score(labels, preds) + + +def explained_variance_score_np(preds, labels): + preds = preds.flatten() + labels = labels.flatten() + return explained_variance_score(labels, preds) diff --git a/libcity/model/traffic_flow_prediction/PDFormer.py b/libcity/model/traffic_flow_prediction/PDFormer.py new file mode 100644 index 0000000..5f89b60 --- /dev/null +++ b/libcity/model/traffic_flow_prediction/PDFormer.py @@ -0,0 +1,504 @@ +import math +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init +from functools import partial +from logging import getLogger +from libcity.model import loss +from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel + + +def drop_path(x, drop_prob=0., training=False): + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() + output = x.div(keep_prob) * random_tensor + return output + + +class TokenEmbedding(nn.Module): + def __init__(self, input_dim, embed_dim, norm_layer=None): + super().__init__() + self.token_embed = nn.Linear(input_dim, embed_dim, bias=True) + self.norm = norm_layer(embed_dim) if norm_layer is not None else nn.Identity() + + def forward(self, x): + x = self.token_embed(x) + x = self.norm(x) + return x + + +class PositionalEncoding(nn.Module): + def __init__(self, embed_dim, max_len=100): + super(PositionalEncoding, self).__init__() + pe = torch.zeros(max_len, embed_dim).float() + pe.require_grad = False + + position = torch.arange(0, max_len).float().unsqueeze(1) + div_term = (torch.arange(0, embed_dim, 2).float() * -(math.log(10000.0) / embed_dim)).exp() + + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, x): + return self.pe[:, :x.size(1)].unsqueeze(2).expand_as(x).detach() + + +class LaplacianPE(nn.Module): + def __init__(self, lape_dim, embed_dim): + super().__init__() + self.embedding_lap_pos_enc = nn.Linear(lape_dim, embed_dim) + + def forward(self, lap_mx): + lap_pos_enc = self.embedding_lap_pos_enc(lap_mx).unsqueeze(0).unsqueeze(0) + return lap_pos_enc + + +class DataEmbedding(nn.Module): + def __init__( + self, feature_dim, embed_dim, lape_dim, adj_mx, drop=0., + add_time_in_day=False, add_day_in_week=False, device=torch.device('cpu'), + ): + super().__init__() + + self.add_time_in_day = add_time_in_day + self.add_day_in_week = add_day_in_week + + self.device = device + self.embed_dim = embed_dim + self.feature_dim = feature_dim + self.value_embedding = TokenEmbedding(feature_dim, embed_dim) + + self.position_encoding = PositionalEncoding(embed_dim) + if self.add_time_in_day: + self.minute_size = 1440 + self.daytime_embedding = nn.Embedding(self.minute_size, embed_dim) + if self.add_day_in_week: + weekday_size = 7 + self.weekday_embedding = nn.Embedding(weekday_size, embed_dim) + self.spatial_embedding = LaplacianPE(lape_dim, embed_dim) + self.dropout = nn.Dropout(drop) + + def forward(self, x, lap_mx): + origin_x = x + x = self.value_embedding(origin_x[:, :, :, :self.feature_dim]) + x += self.position_encoding(x) + if self.add_time_in_day: + x += self.daytime_embedding((origin_x[:, :, :, self.feature_dim] * self.minute_size).round().long()) + if self.add_day_in_week: + x += self.weekday_embedding(origin_x[:, :, :, self.feature_dim + 1: self.feature_dim + 8].argmax(dim=3)) + x += self.spatial_embedding(lap_mx) + x = self.dropout(x) + return x + + +class DropPath(nn.Module): + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Chomp2d(nn.Module): + def __init__(self, chomp_size): + super(Chomp2d, self).__init__() + self.chomp_size = chomp_size + + def forward(self, x): + return x[:, :, :x.shape[2] - self.chomp_size, :].contiguous() + + +class STSelfAttention(nn.Module): + def __init__( + self, dim, s_attn_size, t_attn_size, geo_num_heads=4, sem_num_heads=2, t_num_heads=2, qkv_bias=False, + attn_drop=0., proj_drop=0., device=torch.device('cpu'), output_dim=1, + ): + super().__init__() + assert dim % (geo_num_heads + sem_num_heads + t_num_heads) == 0 + self.geo_num_heads = geo_num_heads + self.sem_num_heads = sem_num_heads + self.t_num_heads = t_num_heads + self.head_dim = dim // (geo_num_heads + sem_num_heads + t_num_heads) + self.scale = self.head_dim ** -0.5 + self.device = device + self.s_attn_size = s_attn_size + self.t_attn_size = t_attn_size + self.geo_ratio = geo_num_heads / (geo_num_heads + sem_num_heads + t_num_heads) + self.sem_ratio = sem_num_heads / (geo_num_heads + sem_num_heads + t_num_heads) + self.t_ratio = 1 - self.geo_ratio - self.sem_ratio + self.output_dim = output_dim + + self.pattern_q_linears = nn.ModuleList([ + nn.Linear(dim, int(dim * self.geo_ratio)) for _ in range(output_dim) + ]) + self.pattern_k_linears = nn.ModuleList([ + nn.Linear(dim, int(dim * self.geo_ratio)) for _ in range(output_dim) + ]) + self.pattern_v_linears = nn.ModuleList([ + nn.Linear(dim, int(dim * self.geo_ratio)) for _ in range(output_dim) + ]) + + self.geo_q_conv = nn.Conv2d(dim, int(dim * self.geo_ratio), kernel_size=1, bias=qkv_bias) + self.geo_k_conv = nn.Conv2d(dim, int(dim * self.geo_ratio), kernel_size=1, bias=qkv_bias) + self.geo_v_conv = nn.Conv2d(dim, int(dim * self.geo_ratio), kernel_size=1, bias=qkv_bias) + self.geo_attn_drop = nn.Dropout(attn_drop) + + self.sem_q_conv = nn.Conv2d(dim, int(dim * self.sem_ratio), kernel_size=1, bias=qkv_bias) + self.sem_k_conv = nn.Conv2d(dim, int(dim * self.sem_ratio), kernel_size=1, bias=qkv_bias) + self.sem_v_conv = nn.Conv2d(dim, int(dim * self.sem_ratio), kernel_size=1, bias=qkv_bias) + self.sem_attn_drop = nn.Dropout(attn_drop) + + self.t_q_conv = nn.Conv2d(dim, int(dim * self.t_ratio), kernel_size=1, bias=qkv_bias) + self.t_k_conv = nn.Conv2d(dim, int(dim * self.t_ratio), kernel_size=1, bias=qkv_bias) + self.t_v_conv = nn.Conv2d(dim, int(dim * self.t_ratio), kernel_size=1, bias=qkv_bias) + self.t_attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, x_patterns, pattern_keys, geo_mask=None, sem_mask=None): + B, T, N, D = x.shape + t_q = self.t_q_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_k = self.t_k_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_v = self.t_v_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_q = t_q.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + t_k = t_k.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + t_v = t_v.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + t_attn = (t_q @ t_k.transpose(-2, -1)) * self.scale + t_attn = t_attn.softmax(dim=-1) + t_attn = self.t_attn_drop(t_attn) + t_x = (t_attn @ t_v).transpose(2, 3).reshape(B, N, T, int(D * self.t_ratio)).transpose(1, 2) + + geo_q = self.geo_q_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + geo_k = self.geo_k_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + for i in range(self.output_dim): + pattern_q = self.pattern_q_linears[i](x_patterns[..., i]) + pattern_k = self.pattern_k_linears[i](pattern_keys[..., i]) + pattern_v = self.pattern_v_linears[i](pattern_keys[..., i]) + pattern_attn = (pattern_q @ pattern_k.transpose(-2, -1)) * self.scale + pattern_attn = pattern_attn.softmax(dim=-1) + geo_k += pattern_attn @ pattern_v + geo_v = self.geo_v_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + geo_q = geo_q.reshape(B, T, N, self.geo_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + geo_k = geo_k.reshape(B, T, N, self.geo_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + geo_v = geo_v.reshape(B, T, N, self.geo_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + geo_attn = (geo_q @ geo_k.transpose(-2, -1)) * self.scale + if geo_mask is not None: + geo_attn.masked_fill_(geo_mask, float('-inf')) + geo_attn = geo_attn.softmax(dim=-1) + geo_attn = self.geo_attn_drop(geo_attn) + geo_x = (geo_attn @ geo_v).transpose(2, 3).reshape(B, T, N, int(D * self.geo_ratio)) + + sem_q = self.sem_q_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + sem_k = self.sem_k_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + sem_v = self.sem_v_conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + sem_q = sem_q.reshape(B, T, N, self.sem_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + sem_k = sem_k.reshape(B, T, N, self.sem_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + sem_v = sem_v.reshape(B, T, N, self.sem_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + sem_attn = (sem_q @ sem_k.transpose(-2, -1)) * self.scale + if sem_mask is not None: + sem_attn.masked_fill_(sem_mask, float('-inf')) + sem_attn = sem_attn.softmax(dim=-1) + sem_attn = self.sem_attn_drop(sem_attn) + sem_x = (sem_attn @ sem_v).transpose(2, 3).reshape(B, T, N, int(D * self.sem_ratio)) + + x = self.proj(torch.cat([t_x, geo_x, sem_x], dim=-1)) + x = self.proj_drop(x) + return x + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class TemporalSelfAttention(nn.Module): + def __init__( + self, dim, dim_out, t_attn_size, t_num_heads=6, qkv_bias=False, + attn_drop=0., proj_drop=0., device=torch.device('cpu'), + ): + super().__init__() + assert dim % t_num_heads == 0 + self.t_num_heads = t_num_heads + self.head_dim = dim // t_num_heads + self.scale = self.head_dim ** -0.5 + self.device = device + self.t_attn_size = t_attn_size + + self.t_q_conv = nn.Conv2d(dim, dim, kernel_size=1, bias=qkv_bias) + self.t_k_conv = nn.Conv2d(dim, dim, kernel_size=1, bias=qkv_bias) + self.t_v_conv = nn.Conv2d(dim, dim, kernel_size=1, bias=qkv_bias) + self.t_attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim_out) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, T, N, D = x.shape + t_q = self.t_q_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_k = self.t_k_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_v = self.t_v_conv(x.permute(0, 3, 1, 2)).permute(0, 3, 2, 1) + t_q = t_q.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + t_k = t_k.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + t_v = t_v.reshape(B, N, T, self.t_num_heads, self.head_dim).permute(0, 1, 3, 2, 4) + + t_attn = (t_q @ t_k.transpose(-2, -1)) * self.scale + + t_attn = t_attn.softmax(dim=-1) + t_attn = self.t_attn_drop(t_attn) + + t_x = (t_attn @ t_v).transpose(2, 3).reshape(B, N, T, D).transpose(1, 2) + + x = self.proj(t_x) + x = self.proj_drop(x) + return x + + +class STEncoderBlock(nn.Module): + + def __init__( + self, dim, s_attn_size, t_attn_size, geo_num_heads=4, sem_num_heads=2, t_num_heads=2, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, device=torch.device('cpu'), type_ln="pre", output_dim=1, + ): + super().__init__() + self.type_ln = type_ln + self.norm1 = norm_layer(dim) + self.st_attn = STSelfAttention( + dim, s_attn_size, t_attn_size, geo_num_heads=geo_num_heads, sem_num_heads=sem_num_heads, t_num_heads=t_num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop, device=device, output_dim=output_dim, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, x_patterns, pattern_keys, geo_mask=None, sem_mask=None): + if self.type_ln == 'pre': + x = x + self.drop_path(self.st_attn(self.norm1(x), x_patterns, pattern_keys, geo_mask=geo_mask, sem_mask=sem_mask)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + elif self.type_ln == 'post': + x = self.norm1(x + self.drop_path(self.st_attn(x, x_patterns, pattern_keys, geo_mask=geo_mask, sem_mask=sem_mask))) + x = self.norm2(x + self.drop_path(self.mlp(x))) + return x + + +class PDFormer(AbstractTrafficStateModel): + def __init__(self, config, data_feature): + super().__init__(config, data_feature) + + self._scaler = self.data_feature.get('scaler') + self.num_nodes = self.data_feature.get("num_nodes", 1) + self.feature_dim = self.data_feature.get("feature_dim", 1) + self.ext_dim = self.data_feature.get("ext_dim", 0) + self.num_batches = self.data_feature.get('num_batches', 1) + self.dtw_matrix = self.data_feature.get('dtw_matrix') + self.adj_mx = data_feature.get('adj_mx') + sd_mx = data_feature.get('sd_mx') + sh_mx = data_feature.get('sh_mx') + self._logger = getLogger() + self.dataset = config.get('dataset') + + self.embed_dim = config.get('embed_dim', 64) + self.skip_dim = config.get("skip_dim", 256) + lape_dim = config.get('lape_dim', 8) + geo_num_heads = config.get('geo_num_heads', 4) + sem_num_heads = config.get('sem_num_heads', 2) + t_num_heads = config.get('t_num_heads', 2) + mlp_ratio = config.get("mlp_ratio", 4) + qkv_bias = config.get("qkv_bias", True) + drop = config.get("drop", 0.) + attn_drop = config.get("attn_drop", 0.) + drop_path = config.get("drop_path", 0.3) + self.s_attn_size = config.get("s_attn_size", 3) + self.t_attn_size = config.get("t_attn_size", 3) + enc_depth = config.get("enc_depth", 6) + type_ln = config.get("type_ln", "pre") + self.type_short_path = config.get("type_short_path", "hop") + + self.output_dim = config.get('output_dim', 1) + self.input_window = config.get("input_window", 12) + self.output_window = config.get('output_window', 12) + add_time_in_day = config.get("add_time_in_day", True) + add_day_in_week = config.get("add_day_in_week", True) + self.device = config.get('device', torch.device('cpu')) + self.world_size = config.get('world_size', 1) + self.huber_delta = config.get('huber_delta', 1) + self.quan_delta = config.get('quan_delta', 0.25) + self.far_mask_delta = config.get('far_mask_delta', 5) + self.dtw_delta = config.get('dtw_delta', 5) + + self.use_curriculum_learning = config.get('use_curriculum_learning', True) + self.step_size = config.get('step_size', 2500) + self.max_epoch = config.get('max_epoch', 200) + self.task_level = config.get('task_level', 0) + if self.max_epoch * self.num_batches * self.world_size < self.step_size * self.output_window: + self._logger.warning('Parameter `step_size` is too big with {} epochs and ' + 'the model cannot be trained for all time steps.'.format(self.max_epoch)) + if self.use_curriculum_learning: + self._logger.info('Use use_curriculum_learning!') + + if self.type_short_path == "dist": + distances = sd_mx[~np.isinf(sd_mx)].flatten() + std = distances.std() + sd_mx = np.exp(-np.square(sd_mx / std)) + self.far_mask = torch.zeros(self.num_nodes, self.num_nodes).to(self.device) + self.far_mask[sd_mx < self.far_mask_delta] = 1 + self.far_mask = self.far_mask.bool() + else: + sh_mx = sh_mx.T + self.geo_mask = torch.zeros(self.num_nodes, self.num_nodes).to(self.device) + self.geo_mask[sh_mx >= self.far_mask_delta] = 1 + self.geo_mask = self.geo_mask.bool() + self.sem_mask = torch.ones(self.num_nodes, self.num_nodes).to(self.device) + sem_mask = self.dtw_matrix.argsort(axis=1)[:, :self.dtw_delta] + for i in range(self.sem_mask.shape[0]): + self.sem_mask[i][sem_mask[i]] = 0 + self.sem_mask = self.sem_mask.bool() + + self.pattern_keys = torch.from_numpy(data_feature.get('pattern_keys')).float().to(self.device) + self.pattern_embeddings = nn.ModuleList([ + TokenEmbedding(self.s_attn_size, self.embed_dim) for _ in range(self.output_dim) + ]) + + self.enc_embed_layer = DataEmbedding( + self.feature_dim - self.ext_dim, self.embed_dim, lape_dim, self.adj_mx, drop=drop, + add_time_in_day=add_time_in_day, add_day_in_week=add_day_in_week, device=self.device, + ) + + enc_dpr = [x.item() for x in torch.linspace(0, drop_path, enc_depth)] + self.encoder_blocks = nn.ModuleList([ + STEncoderBlock( + dim=self.embed_dim, s_attn_size=self.s_attn_size, t_attn_size=self.t_attn_size, geo_num_heads=geo_num_heads, sem_num_heads=sem_num_heads, t_num_heads=t_num_heads, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=enc_dpr[i], act_layer=nn.GELU, + norm_layer=partial(nn.LayerNorm, eps=1e-6), device=self.device, type_ln=type_ln, output_dim=self.output_dim, + ) for i in range(enc_depth) + ]) + + self.skip_convs = nn.ModuleList([ + nn.Conv2d( + in_channels=self.embed_dim, out_channels=self.skip_dim, kernel_size=1, + ) for _ in range(enc_depth) + ]) + + self.end_conv1 = nn.Conv2d( + in_channels=self.input_window, out_channels=self.output_window, kernel_size=1, bias=True, + ) + self.end_conv2 = nn.Conv2d( + in_channels=self.skip_dim, out_channels=self.output_dim, kernel_size=1, bias=True, + ) + + def forward(self, batch, lap_mx=None): + x = batch['X'] + T = x.shape[1] + x_pattern_list = [] + for i in range(self.s_attn_size): + x_pattern = F.pad( + x[:, :T + i + 1 - self.s_attn_size, :, :self.output_dim], + (0, 0, 0, 0, self.s_attn_size - 1 - i, 0), + "constant", 0, + ).unsqueeze(-2) + x_pattern_list.append(x_pattern) + x_patterns = torch.cat(x_pattern_list, dim=-2) # (B, T, N, s_attn_size, output_dim) + + x_pattern_list = [] + pattern_key_list = [] + for i in range(self.output_dim): + x_pattern_list.append(self.pattern_embeddings[i](x_patterns[..., i]).unsqueeze(-1)) + pattern_key_list.append(self.pattern_embeddings[i](self.pattern_keys[..., i]).unsqueeze(-1)) + x_patterns = torch.cat(x_pattern_list, dim=-1) + pattern_keys = torch.cat(pattern_key_list, dim=-1) + + enc = self.enc_embed_layer(x, lap_mx) + skip = 0 + for i, encoder_block in enumerate(self.encoder_blocks): + enc = encoder_block(enc, x_patterns, pattern_keys, self.geo_mask, self.sem_mask) + skip += self.skip_convs[i](enc.permute(0, 3, 2, 1)) + + skip = self.end_conv1(F.relu(skip.permute(0, 3, 2, 1))) + skip = self.end_conv2(F.relu(skip.permute(0, 3, 2, 1))) + return skip.permute(0, 3, 2, 1) + + def get_loss_func(self, set_loss): + if set_loss.lower() not in ['mae', 'mse', 'rmse', 'mape', 'logcosh', 'huber', 'quantile', 'masked_mae', + 'masked_mse', 'masked_rmse', 'masked_mape', 'masked_huber', 'r2', 'evar']: + self._logger.warning('Received unrecognized train loss function, set default mae loss func.') + if set_loss.lower() == 'mae': + lf = loss.masked_mae_torch + elif set_loss.lower() == 'mse': + lf = loss.masked_mse_torch + elif set_loss.lower() == 'rmse': + lf = loss.masked_rmse_torch + elif set_loss.lower() == 'mape': + lf = loss.masked_mape_torch + elif set_loss.lower() == 'logcosh': + lf = loss.log_cosh_loss + elif set_loss.lower() == 'huber': + lf = partial(loss.huber_loss, delta=self.huber_delta) + elif set_loss.lower() == 'quantile': + lf = partial(loss.quantile_loss, delta=self.quan_delta) + elif set_loss.lower() == 'masked_mae': + lf = partial(loss.masked_mae_torch, null_val=0) + elif set_loss.lower() == 'masked_mse': + lf = partial(loss.masked_mse_torch, null_val=0) + elif set_loss.lower() == 'masked_rmse': + lf = partial(loss.masked_rmse_torch, null_val=0) + elif set_loss.lower() == 'masked_mape': + lf = partial(loss.masked_mape_torch, null_val=0) + elif set_loss.lower() == 'masked_huber': + lf = partial(loss.masked_huber_loss, delta=self.huber_delta, null_val=0) + elif set_loss.lower() == 'r2': + lf = loss.r2_score_torch + elif set_loss.lower() == 'evar': + lf = loss.explained_variance_score_torch + else: + lf = loss.masked_mae_torch + return lf + + def calculate_loss_without_predict(self, y_true, y_predicted, batches_seen=None, set_loss='masked_mae'): + lf = self.get_loss_func(set_loss=set_loss) + y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) + y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim]) + if self.training: + if batches_seen % self.step_size == 0 and self.task_level < self.output_window: + self.task_level += 1 + self._logger.info('Training: task_level increase from {} to {}'.format( + self.task_level - 1, self.task_level)) + self._logger.info('Current batches_seen is {}'.format(batches_seen)) + if self.use_curriculum_learning: + return lf(y_predicted[:, :self.task_level, :, :], y_true[:, :self.task_level, :, :]) + else: + return lf(y_predicted, y_true) + else: + return lf(y_predicted, y_true) + + def calculate_loss(self, batch, batches_seen=None, lap_mx=None): + y_true = batch['y'] + y_predicted = self.predict(batch, lap_mx) + return self.calculate_loss_without_predict(y_true, y_predicted, batches_seen) + + def predict(self, batch, lap_mx=None): + return self.forward(batch, lap_mx) diff --git a/libcity/model/traffic_flow_prediction/__init__.py b/libcity/model/traffic_flow_prediction/__init__.py new file mode 100644 index 0000000..e02b660 --- /dev/null +++ b/libcity/model/traffic_flow_prediction/__init__.py @@ -0,0 +1,5 @@ +from libcity.model.traffic_flow_prediction.PDFormer import PDFormer + +__all__ = [ + "PDFormer", +] diff --git a/libcity/pipeline/__init__.py b/libcity/pipeline/__init__.py new file mode 100644 index 0000000..1bc1003 --- /dev/null +++ b/libcity/pipeline/__init__.py @@ -0,0 +1,8 @@ +from libcity.pipeline.pipeline import run_model, hyper_parameter, finetune, objective_function + +__all__ = [ + "run_model", + "hyper_parameter", + "finetune", + "objective_function" +] diff --git a/libcity/pipeline/pipeline.py b/libcity/pipeline/pipeline.py new file mode 100644 index 0000000..f1ac7cb --- /dev/null +++ b/libcity/pipeline/pipeline.py @@ -0,0 +1,213 @@ +import os +from ray import tune +from ray.tune.suggest.hyperopt import HyperOptSearch +from ray.tune.suggest.bayesopt import BayesOptSearch +from ray.tune.suggest.basic_variant import BasicVariantGenerator +from ray.tune.schedulers import FIFOScheduler, ASHAScheduler, MedianStoppingRule +from ray.tune.suggest import ConcurrencyLimiter +import json +import torch +import random +import numpy as np + +from libcity.config import ConfigParser +from libcity.data import get_dataset +from libcity.utils import get_executor, get_model, get_logger, ensure_dir + + +def run_model(task=None, model_name=None, dataset_name=None, config_file=None, + saved_model=True, train=True, other_args=None): + config = ConfigParser(task, model_name, dataset_name, + config_file, saved_model, train, other_args) + exp_id = config.get('exp_id', None) + model_name = config.get('model') + if exp_id is None: + exp_id = int(random.SystemRandom().random() * 100000) + config['exp_id'] = exp_id + seed = config.get('seed', None) + if seed is not None: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + logger = get_logger(config) + logger.info('Begin pipeline, task={}, model_name={}, dataset_name={}, exp_id={}'. + format(str(task), str(model_name), str(dataset_name), str(exp_id))) + logger.info(config.config) + dataset = get_dataset(config) + train_data, valid_data, test_data = dataset.get_data() + data_feature = dataset.get_data_feature() + model_cache_file = './libcity/cache/{}/model_cache/{}_{}.m'.format( + exp_id, model_name, dataset_name) + model = get_model(config, data_feature) + executor = get_executor(config, model) + if train or not os.path.exists(model_cache_file): + executor.train(train_data, valid_data) + if saved_model: + executor.save_model(model_cache_file) + else: + executor.load_model(model_cache_file) + executor.evaluate(test_data) + + +def parse_search_space(space_file): + search_space = {} + if os.path.exists('./{}.json'.format(space_file)): + with open('./{}.json'.format(space_file), 'r') as f: + paras_dict = json.load(f) + for name in paras_dict: + paras_type = paras_dict[name]['type'] + if paras_type == 'uniform': + try: + search_space[name] = tune.uniform(paras_dict[name]['lower'], paras_dict[name]['upper']) + except: + raise TypeError('The space file does not meet the format requirements,\ + when parsing uniform type.') + elif paras_type == 'randn': + try: + search_space[name] = tune.randn(paras_dict[name]['mean'], paras_dict[name]['sd']) + except: + raise TypeError('The space file does not meet the format requirements,\ + when parsing randn type.') + elif paras_type == 'randint': + try: + if 'lower' not in paras_dict[name]: + search_space[name] = tune.randint(paras_dict[name]['upper']) + else: + search_space[name] = tune.randint(paras_dict[name]['lower'], paras_dict[name]['upper']) + except: + raise TypeError('The space file does not meet the format requirements,\ + when parsing randint type.') + elif paras_type == 'choice': + try: + search_space[name] = tune.choice(paras_dict[name]['list']) + except: + raise TypeError('The space file does not meet the format requirements,\ + when parsing choice type.') + elif paras_type == 'grid_search': + try: + search_space[name] = tune.grid_search(paras_dict[name]['list']) + except: + raise TypeError('The space file does not meet the format requirements,\ + when parsing grid_search type.') + else: + raise TypeError('The space file does not meet the format requirements,\ + when parsing an undefined type.') + else: + raise FileNotFoundError('The space file {}.json is not found. Please ensure \ + the config file is in the root dir and is a txt.'.format(space_file)) + return search_space + + +def hyper_parameter(task=None, model_name=None, dataset_name=None, config_file=None, space_file=None, + scheduler=None, search_alg=None, other_args=None, num_samples=5, max_concurrent=1, + cpu_per_trial=1, gpu_per_trial=1): + experiment_config = ConfigParser(task, model_name, dataset_name, config_file=config_file, + other_args=other_args) + logger = get_logger(experiment_config) + if space_file is None: + logger.error('the space_file should not be None when hyperparameter tune.') + exit(0) + search_sapce = parse_search_space(space_file) + dataset = get_dataset(experiment_config) + train_data, valid_data, test_data = dataset.get_data() + data_feature = dataset.get_data_feature() + + def train(config, checkpoint_dir=None, experiment_config=None, + train_data=None, valid_data=None, data_feature=None): + for key in config: + if key in experiment_config: + experiment_config[key] = config[key] + experiment_config['hyper_tune'] = True + logger = get_logger(experiment_config) + logger.info('Begin pipeline, task={}, model_name={}, dataset_name={}' + .format(str(task), str(model_name), str(dataset_name))) + logger.info('running parameters: ' + str(config)) + model = get_model(experiment_config, data_feature) + executor = get_executor(experiment_config, model) + if checkpoint_dir: + checkpoint = os.path.join(checkpoint_dir, 'checkpoint') + executor.load_model(checkpoint) + executor.train(train_data, valid_data) + + if search_alg == 'BasicSearch': + algorithm = BasicVariantGenerator() + elif search_alg == 'BayesOptSearch': + algorithm = BayesOptSearch(metric='loss', mode='min') + algorithm = ConcurrencyLimiter(algorithm, max_concurrent=max_concurrent) + elif search_alg == 'HyperOpt': + algorithm = HyperOptSearch(metric='loss', mode='min') + algorithm = ConcurrencyLimiter(algorithm, max_concurrent=max_concurrent) + else: + raise ValueError('the search_alg is illegal.') + if scheduler == 'FIFO': + tune_scheduler = FIFOScheduler() + elif scheduler == 'ASHA': + tune_scheduler = ASHAScheduler() + elif scheduler == 'MedianStoppingRule': + tune_scheduler = MedianStoppingRule() + else: + raise ValueError('the scheduler is illegal') + ensure_dir('./libcity/cache/hyper_tune') + result = tune.run(tune.with_parameters(train, experiment_config=experiment_config, train_data=train_data, + valid_data=valid_data, data_feature=data_feature), + resources_per_trial={'cpu': cpu_per_trial, 'gpu': gpu_per_trial}, config=search_sapce, + metric='loss', mode='min', scheduler=tune_scheduler, search_alg=algorithm, + local_dir='./libcity/cache/hyper_tune', num_samples=num_samples) + best_trial = result.get_best_trial("loss", "min", "last") + logger.info("Best trial config: {}".format(best_trial.config)) + logger.info("Best trial final validation loss: {}".format(best_trial.last_result["loss"])) + best_path = os.path.join(best_trial.checkpoint.value, "checkpoint") + model_state, optimizer_state = torch.load(best_path) + model_cache_file = './libcity/cache/model_cache/{}_{}.m'.format( + model_name, dataset_name) + ensure_dir('./libcity/cache/model_cache') + torch.save((model_state, optimizer_state), model_cache_file) + + +def objective_function(task=None, model_name=None, dataset_name=None, config_file=None, + saved_model=True, train=True, other_args=None, hyper_config_dict=None): + config = ConfigParser(task, model_name, dataset_name, + config_file, saved_model, train, other_args, hyper_config_dict) + dataset = get_dataset(config) + train_data, valid_data, test_data = dataset.get_data() + data_feature = dataset.get_data_feature() + + model = get_model(config, data_feature) + executor = get_executor(config, model) + best_valid_score = executor.train(train_data, valid_data) + test_result = executor.evaluate(test_data) + + return { + 'best_valid_score': best_valid_score, + 'test_result': test_result + } + + +def finetune(task=None, model_name=None, dataset_name=None, config_file=None, + initial_ckpt=None, saved_model=True, train=True, other_args=None): + config = ConfigParser(task, model_name, dataset_name, + config_file, saved_model, train, other_args, initial_ckpt=initial_ckpt) + exp_id = config.get('exp_id', None) + if exp_id is None: + exp_id = int(random.SystemRandom().random() * 100000) + config['exp_id'] = exp_id + logger = get_logger(config) + logger.info('Begin pipeline, task={}, model_name={}, dataset_name={}, initial_ckpt={}, exp_id={}'. + format(str(task), str(model_name), str(dataset_name), str(initial_ckpt), str(exp_id))) + logger.info(config.config) + dataset = get_dataset(config) + train_data, valid_data, test_data = dataset.get_data() + data_feature = dataset.get_data_feature() + model_cache_file = './libcity/cache/{}/model_cache/{}_{}.m'.format( + exp_id, model_name, dataset_name) + model = get_model(config, data_feature) + executor = get_executor(config, model) + if train or not os.path.exists(model_cache_file): + executor.train(train_data, valid_data) + if saved_model: + executor.save_model(model_cache_file) + else: + executor.load_model(model_cache_file) + executor.evaluate(test_data) diff --git a/libcity/utils/__init__.py b/libcity/utils/__init__.py new file mode 100644 index 0000000..2dd5152 --- /dev/null +++ b/libcity/utils/__init__.py @@ -0,0 +1,31 @@ +from libcity.utils.utils import get_executor, get_model, get_evaluator, \ + get_logger, get_local_time, ensure_dir, trans_naming_rule, preprocess_data +from libcity.utils.argument_list import general_arguments, str2bool, \ + str2float, hyper_arguments +from libcity.utils.normalization import Scaler, NoneScaler, NormalScaler, \ + StandardScaler, MinMax01Scaler, MinMax11Scaler, LogScaler +from libcity.utils.distributed import reduce_array, reduce_tensor + +__all__ = [ + "get_executor", + "get_model", + "get_evaluator", + "get_logger", + "get_local_time", + "ensure_dir", + "trans_naming_rule", + "preprocess_data", + "general_arguments", + "hyper_arguments", + "str2bool", + "str2float", + "Scaler", + "NoneScaler", + "NormalScaler", + "StandardScaler", + "MinMax01Scaler", + "MinMax11Scaler", + "LogScaler", + "reduce_array", + "reduce_tensor", +] diff --git a/libcity/utils/argument_list.py b/libcity/utils/argument_list.py new file mode 100644 index 0000000..01437c8 --- /dev/null +++ b/libcity/utils/argument_list.py @@ -0,0 +1,129 @@ +import argparse + +general_arguments = { + "gpu": "bool", + "batch_size": "int", + "train_rate": "float", + "part_train_rate": "float", + "eval_rate": "float", + "learning_rate": "float", + "max_epoch": "int", + "gpu_id": "list of int", + "seed": "int", + "dataset_class": "str", + "executor": "str", + "evaluator": "str", + + "input_window": "int", + "output_window": "int", + "scaler": "str", + "load_external": "bool", + "normal_external": "bool", + "ext_scaler": "str", + "add_time_in_day": "bool", + "add_day_in_week": "bool", + "use_trend": "bool", + "len_closeness": "int", + "len_period": "int", + "len_trend": "int", + "interval_period": "int", + "interval_trend": "int", + "data_col": "str", + "bidir": "bool", + "far_mask_delta": "float", + "dtw_delta": "int", + + "learner": "str", + "weight_decay": "float", + "lr_decay": "bool", + "lr_scheduler": "str", + "lr_eta_min": "float", + "lr_decay_ratio": "float", + "lr_warmup_epoch": "int", + "lr_warmup_init": "float", + "use_early_stop": "bool", + "patience": "int", + "clip_grad_norm": "bool", + "max_grad_norm": "int", + "random_flip": "bool", + "use_curriculum_learning": "bool", + "grad_accmu_steps": "int", + "set_loss": "str", + "huber_delta": "float", + "quan_delta": "float", + + "step_size": "int", + "embed_dim": "int", + "skip_dim": "int", + "lape_dim": "int", + "geo_num_heads": "int", + "sem_num_heads": "int", + "t_num_heads": "int", + "mlp_ratio": "int", + "qkv_bias": "bool", + "drop": "float", + "attn_drop": "float", + "drop_path": "float", + "s_attn_size": "int", + "t_attn_size": "int", + "enc_depth": "int", + "dec_depth": "int", + "type_ln": "str", + "type_short_path": "str", + "cand_key_days": "int", + "n_cluster": "int", + "cluster_max_iter": "int", + "cluster_method": "str", + + "mode": "str", + "mask_val": "int" +} + +hyper_arguments = { + "gpu": { + "type": "bool", + "default": None, + "help": "whether use gpu" + }, + "gpu_id": { + "type": "int", + "default": None, + "help": "the gpu id to use" + }, + "train_rate": { + "type": "float", + "default": None, + "help": "the train set rate" + }, + "eval_rate": { + "type": "float", + "default": None, + "help": "the validation set rate" + }, + "batch_size": { + "type": "int", + "default": None, + "help": "the batch size" + } +} + + +def str2bool(s): + if isinstance(s, bool): + return s + if s.lower() in ('yes', 'true'): + return True + elif s.lower() in ('no', 'false'): + return False + else: + raise argparse.ArgumentTypeError('bool value expected.') + + +def str2float(s): + if isinstance(s, float): + return s + try: + x = float(s) + except ValueError: + raise argparse.ArgumentTypeError('float value expected.') + return x diff --git a/libcity/utils/distributed.py b/libcity/utils/distributed.py new file mode 100644 index 0000000..1aeb7cb --- /dev/null +++ b/libcity/utils/distributed.py @@ -0,0 +1,16 @@ +import torch +from torch import distributed as dist + + +def reduce_array(tensor, n, device): + rt = torch.tensor(tensor).to(device) + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt diff --git a/libcity/utils/normalization.py b/libcity/utils/normalization.py new file mode 100644 index 0000000..36d2ca5 --- /dev/null +++ b/libcity/utils/normalization.py @@ -0,0 +1,82 @@ +import numpy as np + + +class Scaler: + + def transform(self, data): + raise NotImplementedError("Transform not implemented") + + def inverse_transform(self, data): + raise NotImplementedError("Inverse_transform not implemented") + + +class NoneScaler(Scaler): + + def transform(self, data): + return data + + def inverse_transform(self, data): + return data + + +class NormalScaler(Scaler): + + def __init__(self, maxx): + self.max = maxx + + def transform(self, data): + return data / self.max + + def inverse_transform(self, data): + return data * self.max + + +class StandardScaler(Scaler): + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def transform(self, data): + return (data - self.mean) / self.std + + def inverse_transform(self, data): + return (data * self.std) + self.mean + + +class MinMax01Scaler(Scaler): + + def __init__(self, minn, maxx): + self.min = minn + self.max = maxx + + def transform(self, data): + return (data - self.min) / (self.max - self.min) + + def inverse_transform(self, data): + return data * (self.max - self.min) + self.min + + +class MinMax11Scaler(Scaler): + + def __init__(self, minn, maxx): + self.min = minn + self.max = maxx + + def transform(self, data): + return ((data - self.min) / (self.max - self.min)) * 2. - 1. + + def inverse_transform(self, data): + return ((data + 1.) / 2.) * (self.max - self.min) + self.min + + +class LogScaler(Scaler): + + def __init__(self, eps=0.999): + self.eps = eps + + def transform(self, data): + return np.log(data + self.eps) + + def inverse_transform(self, data): + return np.exp(data) - self.eps diff --git a/libcity/utils/utils.py b/libcity/utils/utils.py new file mode 100644 index 0000000..441bfb1 --- /dev/null +++ b/libcity/utils/utils.py @@ -0,0 +1,125 @@ +import importlib +import logging +import datetime +import os +import sys +import numpy as np + + +def get_executor(config, model): + try: + return getattr(importlib.import_module('libcity.executor'), + config['executor'])(config, model) + except AttributeError: + raise AttributeError('executor is not found') + + +def get_model(config, data_feature): + if config['task'] == 'traffic_state_pred': + try: + return getattr(importlib.import_module('libcity.model.traffic_flow_prediction'), + config['model'])(config, data_feature) + except AttributeError: + raise AttributeError('model is not found') + else: + raise AttributeError('task is not found') + + +def get_evaluator(config): + try: + return getattr(importlib.import_module('libcity.evaluator'), + config['evaluator'])(config) + except AttributeError: + raise AttributeError('evaluator is not found') + + +def get_logger(config, name=None): + log_dir = './libcity/log' + if not os.path.exists(log_dir): + os.makedirs(log_dir) + log_filename = '{}-{}-{}-{}.log'.format(config['exp_id'], + config['model'], config['dataset'], get_local_time()) + logfilepath = os.path.join(log_dir, log_filename) + + logger = logging.getLogger(name) + + log_level = config.get('log_level', 'INFO') + + if log_level.lower() == 'info': + level = logging.INFO + elif log_level.lower() == 'debug': + level = logging.DEBUG + elif log_level.lower() == 'error': + level = logging.ERROR + elif log_level.lower() == 'warning': + level = logging.WARNING + elif log_level.lower() == 'critical': + level = logging.CRITICAL + else: + level = logging.INFO + + logger.setLevel(level) + + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + file_handler = logging.FileHandler(logfilepath) + file_handler.setFormatter(formatter) + + console_formatter = logging.Formatter( + '%(asctime)s - %(levelname)s - %(message)s') + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setFormatter(console_formatter) + + logger.addHandler(file_handler) + logger.addHandler(console_handler) + + logger.info('Log directory: %s', log_dir) + return logger + + +def get_local_time(): + cur = datetime.datetime.now() + cur = cur.strftime('%b-%d-%Y_%H-%M-%S') + return cur + + +def ensure_dir(dir_path): + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + +def trans_naming_rule(origin, origin_rule, target_rule): + target = '' + if origin_rule == 'upper_camel_case' and target_rule == 'under_score_rule': + for i, c in enumerate(origin): + if i == 0: + target = c.lower() + else: + target += '_' + c.lower() if c.isupper() else c + return target + else: + raise NotImplementedError( + 'trans naming rule only support from upper_camel_case to \ + under_score_rule') + + +def preprocess_data(data, config): + train_rate = config.get('train_rate', 0.7) + eval_rate = config.get('eval_rate', 0.1) + + input_window = config.get('input_window', 12) + output_window = config.get('output_window', 3) + + x, y = [], [] + for i in range(len(data) - input_window - output_window): + a = data[i: i + input_window + output_window] + x.append(a[0: input_window]) + y.append(a[input_window: input_window + output_window]) + x = np.array(x) + y = np.array(y) + + train_size = int(x.shape[0] * (train_rate + eval_rate)) + trainx = x[:train_size] + trainy = y[:train_size] + testx = x[train_size:x.shape[0]] + testy = y[train_size:x.shape[0]] + return trainx, trainy, testx, testy diff --git a/raw_data/readme.md b/raw_data/readme.md new file mode 100644 index 0000000..65b5f69 --- /dev/null +++ b/raw_data/readme.md @@ -0,0 +1,3 @@ +You can get all 6 datasets from the [LibCity](https://github.com/LibCity/Bigscity-LibCity) repository. + +The dataset link is [BaiduDisk with code 1231](https://pan.baidu.com/s/1qEfcXBO-QwZfiT0G3IYMpQ) or [Google Drive](https://drive.google.com/drive/folders/1g5v2Gq1tkOq8XO0HDCZ9nOTtRpB6-gPe?usp=sharing). All dataset used in LibCity needs to be processed into the [atomic files](https://bigscity-libcity-docs.readthedocs.io/en/latest/user_guide/data/atomic_files.html) format. diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..b88a6a6 --- /dev/null +++ b/readme.md @@ -0,0 +1,65 @@ +# [AAAI2023] PDFormer: Propagation Delay-aware Dynamic Long-range Transformer for Traffic Flow Prediction + +This is a PyTorch implementation of Propagation Delay-aware Dynamic Long-range Transformer for Traffic Flow Prediction (**PDFormer**) for traffic flow prediction, as described in our paper: [Jiawei Jiang](https://github.com/aptx1231)\*, [Chengkai Han](https://github.com/NickHan-cs)\*, Wayne Xin Zhao, Xiaohan Jiang, JingyuanWang, **[Propagation Delay-aware Dynamic Long-range Transformer for Traffic Flow Prediction]()**, AAAI2023. + +> \* Equal Contributions. + +![framework](./framework.png) + +## Requirements + +Our code is based on Python version 3.9.7 and PyTorch version 1.10.1. Please make sure you have installed Python and PyTorch correctly. Then you can install all the dependencies with the following command by pip: + +```shell +pip install -r requirements.txt +``` + +## Data + +You can get all 6 datasets from the [LibCity](https://github.com/LibCity/Bigscity-LibCity) repository. + +The dataset link is [BaiduDisk with code 1231](https://pan.baidu.com/s/1qEfcXBO-QwZfiT0G3IYMpQ) or [Google Drive](https://drive.google.com/drive/folders/1g5v2Gq1tkOq8XO0HDCZ9nOTtRpB6-gPe?usp=sharing). All dataset used in LibCity needs to be processed into the [atomic files](https://bigscity-libcity-docs.readthedocs.io/en/latest/user_guide/data/atomic_files.html) format. + +Note that our model would calculate a **DTW matrix** and a **traffic pattern set** for each dataset, which is time-consuming. Therefore, we have provided DTW matrices and traffic pattern sets of all datasets in `./libcity/cache/dataset_cache/`. + +## Train & Test + +You can train and test **PDFormer** through the following commands for 6 datasets. + +```shell +python run_model.py --task traffic_state_pred --model PDFormer --dataset PeMS04 --config_file PeMS04 +python run_model.py --task traffic_state_pred --model PDFormer --dataset PeMS08 --config_file PeMS08 +python run_model.py --task traffic_state_pred --model PDFormer --dataset PeMS07 --config_file PeMS07 +python run_model.py --task traffic_state_pred --model PDFormer --dataset NYCTaxi --config_file NYCTaxi +python run_model.py --task traffic_state_pred --model PDFormer --dataset CHIBike --config_file CHIBike +python run_model.py --task traffic_state_pred --model PDFormer --dataset T-Drive --config_file T-Drive +``` + +If you have trained a model as above and only want to test it, you can set it as follows (taking PeMS08 as an example, assuming the experiment ID during training is $ID): + +```shell +python run_model.py --task traffic_state_pred --model PDFormer --dataset PeMS08 --config_file PeMS08 --train false --exp_id $ID +``` + +## Contributors + +[![img](https://avatars.githubusercontent.com/u/59010369?v=4)](https://github.com/NickHan-cs) [![img](https://avatars.githubusercontent.com/u/35984903?v=4)](https://github.com/aptx1231) + +## Reference Code + +The code references several open source repositories, to whom thanks are expressed here, including [LibCity](https://github.com/LibCity/Bigscity-LibCity). + +## Cite + +If you find the paper useful, please cite as following: + +``` +@inproceedings{pdformer, + title={PDFormer: Propagation Delay-aware Dynamic Long-range Transformer for Traffic Flow Prediction}, + author={Jiawei Jiang and Chengkai Han and Wayne Xin Zhao and Jingyuan Wang}, + booktitle = {{AAAI}}, + publisher = {{AAAI} Press}, + year = {2023} +} +``` + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5cb81bc --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +numpy==1.21.2 +torch==1.10.1 +scipy==1.7.3 +pandas==1.1.5 +tensorboard==2.4.1 +flake8==3.3.0 +pep8-naming==0.4.1 +scikit-learn==0.24.0 +pytest==3.2.1 +ray==1.4.1 +tabulate==0.8.9 +hyperopt==0.2.5 +protobuf==3.20.0 +tqdm==4.60.0 +fastdtw==0.3.4 +networkx==2.5.1 +gensim==4.0.1 +nltk==3.2.4 +statsmodels==0.13.1 +tslearn==0.5.2 diff --git a/run_model.py b/run_model.py new file mode 100644 index 0000000..f684f6e --- /dev/null +++ b/run_model.py @@ -0,0 +1,54 @@ +import os +import argparse + +from libcity.pipeline import run_model +from libcity.utils import general_arguments, str2bool, str2float + + +def add_other_args(parser): + for arg in general_arguments: + if general_arguments[arg] == 'int': + parser.add_argument('--{}'.format(arg), type=int, default=None) + elif general_arguments[arg] == 'bool': + parser.add_argument('--{}'.format(arg), + type=str2bool, default=None) + elif general_arguments[arg] == 'str': + parser.add_argument('--{}'.format(arg), + type=str, default=None) + elif general_arguments[arg] == 'float': + parser.add_argument('--{}'.format(arg), + type=str2float, default=None) + elif general_arguments[arg] == 'list of int': + parser.add_argument('--{}'.format(arg), nargs='+', + type=int, default=None) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--task', type=str, + default='traffic_state_pred', help='the name of task') + parser.add_argument('--model', type=str, + default='GRU', help='the name of model') + parser.add_argument('--dataset', type=str, + default='METR_LA', help='the name of dataset') + parser.add_argument('--config_file', type=str, + default=None, help='the file name of config file') + parser.add_argument('--saved_model', type=str2bool, + default=True, help='whether save the trained model') + parser.add_argument('--train', type=str2bool, default=True, + help='whether re-train model if the model is \ + trained before') + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument('--exp_id', type=str, + default=None, help='id of experiment') + add_other_args(parser) + args = parser.parse_args() + dict_args = vars(args) + other_args = {key: val for key, val in dict_args.items() if key not in [ + 'task', 'model', 'dataset', 'config_file', 'saved_model', 'train'] and + val is not None} + if args.gpu_id is not None: + os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, args.gpu_id)) + run_model(task=args.task, model_name=args.model, dataset_name=args.dataset, + config_file=args.config_file, saved_model=args.saved_model, + train=args.train, other_args=other_args)