diff --git a/.gitignore b/.gitignore
index 104809f88..dc57879be 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,9 +3,16 @@ __pycache__/
*.py[cod]
*$py.class
+test_results/test_iTransformer_custom_MS_ft15_sl1_ll3_pl512_dm8_nh8_el8_dl1024_df1_fctimeF_ebTrue_dttest_projection_0
+
+results/test_iTransformer_custom_MS_ft15_sl1_ll3_pl512_dm8_nh8_el8_dl1024_df1_fctimeF_ebTrue_dttest_projection_0
+result_long_term_forecast.txt
# C extensions
*.so
-
+input/test/data.csv
+input/test/scaler.pkl
+input/train/data.csv
+input/train/scaler.pkl
*/.DS_Store
# Distribution / packaging
@@ -128,4 +135,4 @@ venv.bak/
dmypy.json
# Pyre type checker
-.pyre/
\ No newline at end of file
+.pyre/
diff --git a/LICENSE b/LICENSE
index 6f6856e74..f4ad474ad 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,7 @@
MIT License
Copyright (c) 2022 THUML @ Tsinghua University
+Copyright (c) 2024 cloner174 @ Hamed Hajipour
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/checkpoints/DONOTREMOVE b/checkpoints/DONOTREMOVE
new file mode 100644
index 000000000..e69de29bb
diff --git a/data_provider/data_factory.py b/data_provider/data_factory.py
index 51a5fc87f..b55e7c4f5 100644
--- a/data_provider/data_factory.py
+++ b/data_provider/data_factory.py
@@ -16,39 +16,58 @@
def data_provider(args, flag):
Data = data_dict[args.data]
timeenc = 0 if args.embed != 'timeF' else 1
-
- if flag == 'test':
- shuffle_flag = False
- drop_last = True
- batch_size = 1 # bsz=1 for evaluation
- freq = args.freq
- elif flag == 'pred':
+ if flag == 'pred' :
shuffle_flag = False
drop_last = False
batch_size = 1
freq = args.freq
Data = Dataset_Pred
+ data_set = Data(
+ root_path=args.pred_root_path,
+ data_path=args.pred_data_path,
+ flag=flag,
+ size=[args.seq_len, args.label_len, args.pred_len],
+ features=args.features,
+ target=args.target,
+ timeenc=timeenc,
+ freq=freq,
+ kind_of_scaler=args.kind_of_scaler if hasattr(args, 'kind_of_scaler') else 'standard',
+ name_of_col_with_date = args.name_of_col_with_date if hasattr(args, 'name_of_col_with_date') else 'date',
+ scale = args.scale if hasattr(args, 'scale') else True,
+ max_use_of_row = args.max_use_of_row if hasattr(args, 'max_use_of_row') else 'No Lim',
+ )
+ print(flag, len(data_set))
else:
- shuffle_flag = True
- drop_last = True
- batch_size = args.batch_size # bsz for train and valid
- freq = args.freq
-
- data_set = Data(
- root_path=args.root_path,
- data_path=args.data_path,
- flag=flag,
- size=[args.seq_len, args.label_len, args.pred_len],
- features=args.features,
- target=args.target,
- timeenc=timeenc,
- freq=freq,
- )
- print(flag, len(data_set))
+ if flag == 'test':
+ shuffle_flag = False
+ drop_last = True
+ batch_size = 1 # bsz=1 for evaluation
+ freq = args.freq
+ else:
+ shuffle_flag = True
+ drop_last = True
+ batch_size = args.batch_size # bsz for train and valid
+ freq = args.freq
+ data_set = Data(
+ root_path=args.root_path,
+ data_path=args.data_path,
+ flag=flag,
+ size=[args.seq_len, args.label_len, args.pred_len],
+ features=args.features,
+ target=args.target,
+ timeenc=timeenc,
+ freq=freq,
+ test_size = args.test_size if hasattr(args, 'test_size') else 0.2,
+ kind_of_scaler= args.kind_of_scaler if hasattr(args, 'kind_of_scaler') else 'standard',
+ name_of_col_with_date = args.name_of_col_with_date if hasattr(args, 'name_of_col_with_date') else 'date',
+ scale = args.scale if hasattr(args, 'scale') else True,
+ )
+ print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
+
return data_set, data_loader
diff --git a/data_provider/data_loader.py b/data_provider/data_loader.py
index c86471da7..0885d6400 100644
--- a/data_provider/data_loader.py
+++ b/data_provider/data_loader.py
@@ -1,11 +1,13 @@
import os
import numpy as np
import pandas as pd
+import joblib
import torch
from torch.utils.data import Dataset, DataLoader
-from sklearn.preprocessing import StandardScaler
+from sklearn.preprocessing import StandardScaler, MinMaxScaler
from utils.timefeatures import time_features
import warnings
+import time
warnings.filterwarnings('ignore')
@@ -189,15 +191,25 @@ def inverse_transform(self, data):
class Dataset_Custom(Dataset):
- def __init__(self, root_path, flag='train', size=None,
- features='S', data_path='ETTh1.csv',
- target='OT', scale=True, timeenc=0, freq='h'):
+ def __init__(self,
+ root_path,
+ flag='train',
+ size=None,
+ features='MS',
+ data_path='data.csv',
+ target='Close',
+ scale=True,
+ timeenc=0,
+ freq='b',
+ test_size = 0.2,
+ kind_of_scaler = None,
+ name_of_col_with_date = None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
- self.seq_len = 24 * 4 * 4
- self.label_len = 24 * 4
- self.pred_len = 24 * 4
+ self.seq_len = 1 * 5 * 3 # Three week - work week ! only 5 days are alive!
+ self.label_len = 1 * 1 # Predict one day ahead
+ self.pred_len = 1 * 1 # Just for one time!
else:
self.seq_len = size[0]
self.label_len = size[1]
@@ -212,25 +224,40 @@ def __init__(self, root_path, flag='train', size=None,
self.scale = scale
self.timeenc = timeenc
self.freq = freq
-
+ self.test_size = test_size if test_size is not None else 0.2
+ self.train_size = 0.90 - test_size
+ self.kind_of_scaler = kind_of_scaler if kind_of_scaler is not None else 'Standard'
+ self.name_of_col_with_date = name_of_col_with_date if name_of_col_with_date is not None else 'date'
self.root_path = root_path
self.data_path = data_path
+
+ self.scaler_path = os.path.join('./input', 'Scalers')
+ os.makedirs(self.scaler_path, exist_ok=True)
+
self.__read_data__()
def __read_data__(self):
- self.scaler = StandardScaler()
- df_raw = pd.read_csv(os.path.join(self.root_path,
- self.data_path))
-
+
+ if self.root_path == 'None':
+ df_raw = pd.read_csv(self.data_path)
+ else:
+ df_raw = pd.read_csv(os.path.join(self.root_path,
+ self.data_path))
+
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
+
cols = list(df_raw.columns)
cols.remove(self.target)
- cols.remove('date')
- df_raw = df_raw[['date'] + cols + [self.target]]
- num_train = int(len(df_raw) * 0.7)
- num_test = int(len(df_raw) * 0.2)
+ cols.remove(self.name_of_col_with_date)
+ df_raw = df_raw[[self.name_of_col_with_date] + cols + [self.target]]
+ cols.insert(0, 'date')
+ cols.append(self.target)
+ df_raw = df_raw.set_axis(cols, axis=1)
+
+ num_train = int(len(df_raw) * self.train_size)
+ num_test = int(len(df_raw) * self.test_size)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
@@ -242,14 +269,52 @@ def __read_data__(self):
df_data = df_raw[cols_data]
elif self.features == 'S':
df_data = df_raw[[self.target]]
-
+
+ file_path = self.scaler_path + 'scaler.pkl'
+ if os.path.exists(file_path):
+ base, ext = os.path.splitext(file_path)
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ file_path = f"{base}_{timestamp}{ext}"
+ self.scaler_path = file_path
+
if self.scale:
- train_data = df_data[border1s[0]:border2s[0]]
- self.scaler.fit(train_data.values)
- data = self.scaler.transform(df_data.values)
+ if self.features == 'S' or self.features == 'MS':
+ col_scaled = []
+ for col in df_data.columns:
+ col_data = df_data[[col]].values
+ if self.kind_of_scaler == 'MinMax':
+ if col == self.target:
+ self.scaler = MinMaxScaler()
+ else:
+ scaler = MinMaxScaler()
+ else:
+ if col == self.target:
+ self.scaler = StandardScaler()
+ else:
+ scaler = StandardScaler()
+ if col == self.target:
+ self.scaler.fit(col_data[border1s[0]:border2s[0]])
+ joblib.dump(self.scaler, self.scaler_path)
+ col_temp = self.scaler.transform(col_data)
+ else:
+ scaler.fit(col_data[border1s[0]:border2s[0]])
+ col_temp = scaler.transform(col_data)
+ col_scaled.append(col_temp)
+ if len(col_scaled) == 1:
+ data = col_scaled[0]
+ else:
+ data = np.concatenate(col_scaled, axis = 1)
+ else:
+ if self.kind_of_scaler == 'MinMax':
+ self.scaler = MinMaxScaler()
+ else:
+ self.scaler = StandardScaler()
+ train_data = df_data[border1s[0]:border2s[0]]
+ self.scaler.fit(train_data.values)
+ data = self.scaler.transform(df_data.values)
else:
data = df_data.values
-
+
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
@@ -257,7 +322,7 @@ def __read_data__(self):
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
- data_stamp = df_stamp.drop(['date'], 1).values
+ data_stamp = df_stamp.drop(['date'], axis = 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
@@ -288,8 +353,8 @@ def inverse_transform(self, data):
class Dataset_PEMS(Dataset):
def __init__(self, root_path, flag='train', size=None,
- features='S', data_path='ETTh1.csv',
- target='OT', scale=True, timeenc=0, freq='h'):
+ features='S', data_path='ETTh1.csv',
+ target='OT', scale=True, timeenc=0, freq='h'):
# size [seq_len, label_len, pred_len]
# info
self.seq_len = size[0]
@@ -431,21 +496,26 @@ def inverse_transform(self, data):
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
- features='S', data_path='ETTh1.csv',
- target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):
+ features='MS', data_path='data-pred.csv',
+ target='Close', scale=True, inverse=False, timeenc=0, freq='b', cols=None,
+ max_use_of_row = 'No Lim',#It also can be 'All Except a Week' or 'All Except 3 Days'
+ kind_of_scaler = None,
+ name_of_col_with_date = None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
- self.seq_len = 24 * 4 * 4
- self.label_len = 24 * 4
- self.pred_len = 24 * 4
+ self.seq_len = 1 * 5 * 6
+ self.label_len = 1 * 1
+ self.pred_len = 1 * 3
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
-
+
+ self.kind_of_scaler = kind_of_scaler if kind_of_scaler is not None else 'Standard'
+ self.name_of_col_with_date = name_of_col_with_date if name_of_col_with_date is not None else 'date'
self.features = features
self.target = target
self.scale = scale
@@ -455,12 +525,22 @@ def __init__(self, root_path, flag='pred', size=None,
self.cols = cols
self.root_path = root_path
self.data_path = data_path
+ max_use_of_row = max_use_of_row if max_use_of_row is not None else 'No Lim'
+ self.max_use_of_row = 7 if max_use_of_row.lower() == 'all except a week' else 3 if max_use_of_row.lower() == 'all except 3 days' else 0
+
+ self.scaler_path = os.path.join('./input', 'Scalers')
+ os.makedirs(self.scaler_path, exist_ok=True)
+
self.__read_data__()
-
+
+
def __read_data__(self):
self.scaler = StandardScaler()
- df_raw = pd.read_csv(os.path.join(self.root_path,
- self.data_path))
+ if self.root_path == 'None':
+ df_raw = pd.read_csv(self.data_path)
+ else:
+ df_raw = pd.read_csv(os.path.join(self.root_path,
+ self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
@@ -470,27 +550,70 @@ def __read_data__(self):
else:
cols = list(df_raw.columns)
cols.remove(self.target)
- cols.remove('date')
- df_raw = df_raw[['date'] + cols + [self.target]]
- border1 = len(df_raw) - self.seq_len
- border2 = len(df_raw)
-
+ cols.remove(self.name_of_col_with_date)
+ df_raw = df_raw[[self.name_of_col_with_date] + cols + [self.target]]
+ cols.insert(0, 'date')
+ cols.append(self.target)
+ df_raw = df_raw.set_axis(cols, axis=1)
+
+ border1 = len(df_raw) - self.max_use_of_row - self.seq_len
+ border2 = len(df_raw) - self.max_use_of_row
+
if self.features == 'M' or self.features == 'MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == 'S':
df_data = df_raw[[self.target]]
-
+
+
+ file_path = self.scaler_path + 'scaler.pkl'
+ if os.path.exists(file_path):
+ base, ext = os.path.splitext(file_path)
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ file_path = f"{base}_{timestamp}{ext}"
+ self.scaler_path = file_path
if self.scale:
- self.scaler.fit(df_data.values)
- data = self.scaler.transform(df_data.values)
+ if self.features == 'S' or self.features == 'MS':
+ col_scaled = []
+ for col in df_data.columns:
+ col_data = df_data[[col]].values
+ if self.kind_of_scaler == 'MinMax':
+ if col == self.target:
+ self.scaler = MinMaxScaler()
+ else:
+ scaler = MinMaxScaler()
+ else:
+ if col == self.target:
+ self.scaler = StandardScaler()
+ else:
+ scaler = StandardScaler()
+ if col == self.target:
+ self.scaler.fit(col_data)
+ joblib.dump(self.scaler, self.scaler_path)
+ col_temp = self.scaler.transform(col_data)
+ else:
+ scaler.fit(col_data)
+ col_temp = scaler.transform(col_data)
+ col_scaled.append(col_temp)
+ if len(col_scaled) == 1:
+ data = col_scaled[0]
+ else:
+ data = np.concatenate(col_scaled, axis = 1)
+ else:
+ if self.kind_of_scaler == 'MinMax':
+ self.scaler = MinMaxScaler()
+ else:
+ self.scaler = StandardScaler()
+
+ self.scaler.fit(df_data.values)
+ data = self.scaler.transform(df_data.values)
else:
data = df_data.values
-
+
tmp_stamp = df_raw[['date']][border1:border2]
tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)
pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)
-
+
df_stamp = pd.DataFrame(columns=['date'])
df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])
if self.timeenc == 0:
@@ -500,24 +623,24 @@ def __read_data__(self):
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)
df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)
- data_stamp = df_stamp.drop(['date'], 1).values
+ data_stamp = df_stamp.drop(['date'], axis=1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
-
+
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
-
+
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
-
+
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = self.data_x[r_begin:r_begin + self.label_len]
@@ -525,11 +648,11 @@ def __getitem__(self, index):
seq_y = self.data_y[r_begin:r_begin + self.label_len]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
-
+
return seq_x, seq_y, seq_x_mark, seq_y_mark
-
+
def __len__(self):
return len(self.data_x) - self.seq_len + 1
-
+
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
diff --git a/experiments/after_train.py b/experiments/after_train.py
new file mode 100644
index 000000000..baa4cbc9b
--- /dev/null
+++ b/experiments/after_train.py
@@ -0,0 +1,202 @@
+import os
+import time
+import torch
+import tempfile
+import numpy as np
+import pandas as pd
+from datetime import timedelta
+from .exp_long_term_forecasting import Exp_Long_Term_Forecast
+from .exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial
+from .pre_train import SaveArgs, load_args
+
+
+def predict(args, model,
+ predict_root = None, predict_data = None,
+ days_to_predict = 1, retrain = False, new_data = None):#model= setting or actual model
+ """
+ Use Model To Predict Future Days!
+ Argumans:
+ args: Object | str, The models setup. Can be an Object of type DotDict class, or the path to saved file of it -> (args.json).
+ model: str|Object, Whether can be the setting or folder name of path to the 'checkpoint.pth' or the actual model object!
+ days_to_predict: int, How much days, should to be predicted!
+ predict_data: the name of predict data inside pred folder. if None, will use the current name in args.
+
+ retrain: bool, Optional. If True, and new_data is not None, It would change the setting and args to retrain the current model with new data.
+ new_data: str, The new data name inside the root path from args. If None, and retrain is True, It would use the current root path and data name in args to retrain model.
+ \Will Raise an Error, If no data is available/
+ """
+
+ if isinstance(args, str):
+ try:
+ arg = load_args(args)
+ except Exception as e:
+ raise AssertionError(f"Fail to read args.pkl reason -> {e}")
+ else:
+ try:
+ args_path = SaveArgs(args=args, path='', temporary=True)
+ args_path = args_path.path
+ arg = load_args(args_path)
+ os.unlink(args_path)
+ except Exception as e:
+ raise AssertionError(f"Fail to read args.pkl reason -> {e}")
+
+
+ if retrain and new_data is not None:
+ arg.data_path = new_data
+
+ if predict_data is not None:
+ if predict_root is not None:
+ arg.pred_root_path = predict_root
+ arg.pred_data_path = predict_data
+
+ if isinstance(model, Exp_Long_Term_Forecast) or isinstance(model, Exp_Long_Term_Forecast_Partial):
+ model.args = arg
+ exp = model
+ elif isinstance(model, str):
+ if arg.exp_name == 'partial_train':
+ Exp = Exp_Long_Term_Forecast_Partial
+ else:
+ Exp = Exp_Long_Term_Forecast
+ exp = Exp(arg)
+ try:
+ path = os.path.join(arg.checkpoints, model)
+ path = path + '/' + 'checkpoint.pth'
+ exp.model.load_state_dict(torch.load(path))
+ except Exception as e:
+ try:
+ exp.model.load_state_dict(torch.load(model))
+ except:
+ raise AssertionError(f" There was an Error loading your model with the provded path.Assumed path is {model} and Error was: {e}")
+ else:
+ raise TypeError(" The Model Object can be of type str(model checkpoint.pth path) or the actual model from experiments kind of models from this repo.")
+
+ if retrain:
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ for ii in range(arg.itr):
+ setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}_{}'.format(
+ arg.model_id,
+ arg.model,
+ arg.data,
+ arg.features,
+ arg.seq_len,
+ arg.label_len,
+ arg.pred_len,
+ arg.d_model,
+ arg.n_heads,
+ arg.e_layers,
+ arg.d_layers,
+ arg.d_ff,
+ arg.factor,
+ arg.embed,
+ arg.distil,
+ arg.des,
+ arg.class_strategy, ii, timestamp)
+ print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
+ exp.train(setting)
+
+ try:
+ df_temp = pd.read_csv(os.path.join(arg.pred_root_path, arg.pred_data_path))
+ except:
+ print(f'please inter the path to your prediction data in input arguman : predict_root and predict_data')
+ print('Where predict_root is the main folder contained your csv file and predict_data is name of the csv file with .csv at the end')
+ return 0
+ end_at_first = df_temp.shape[0] - 1
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
+ temp_path = f"{temp_file.name}.csv"
+ df_temp.to_csv(temp_path, index= False)
+ temp_file.seek(0)
+ del df_temp
+
+ folder_path = 'results/Prediction Results/'
+ os.makedirs(folder_path, exist_ok=True)
+ file_path = folder_path + 'prediction.csv'
+
+ if os.path.exists(file_path):
+ base, ext = os.path.splitext(file_path)
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ file_path = f"{base}_{timestamp}{ext}"
+
+ for jj in range(days_to_predict):
+ if jj == 0:
+ pass
+ else:
+ arg.pred_root_path = 'None'
+ arg.pred_data_path = temp_path
+ exp.args = arg
+ pred_data, pred_loader = exp._get_data(flag='pred')
+ preds = []
+ exp.model.eval()
+ with torch.no_grad():
+ for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):
+ batch_x = batch_x.float().to(exp.device)
+ batch_y = batch_y.float().to(exp.device)
+ batch_x_mark = batch_x_mark.float().to(exp.device)
+ batch_y_mark = batch_y_mark.float().to(exp.device)
+ dec_inp = torch.zeros_like(batch_y[:, -exp.args.pred_len:, :]).float()
+ dec_inp = torch.cat([batch_y[:, :exp.args.label_len, :], dec_inp], dim=1).float().to(exp.device)
+ if exp.args.use_amp:
+ with torch.cuda.amp.autocast():
+ if exp.args.output_attention:
+ outputs = exp.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
+ else:
+ outputs = exp.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
+ else:
+ if exp.args.output_attention:
+ outputs = exp.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
+ else:
+ outputs = exp.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
+ outputs = outputs.detach().cpu().numpy()
+ if pred_data.scale:
+ shape = outputs.shape
+ outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)
+ preds.append(outputs)
+ preds = np.array(preds)
+ preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
+ #preds = [round(any_) for any_ in preds.reshape(-1).tolist()]
+ preds = list(preds[0,0,:])
+ data = pd.read_csv(temp_path)
+ cols = list(data.columns)
+ date_name = arg.name_of_col_with_date if hasattr(arg, 'name_of_col_with_date') else 'date'
+ target = arg.target
+ data[date_name] = pd.to_datetime(data[date_name])
+ last_day = data.loc[data.shape[0]-1,date_name]
+ next_day = last_day + timedelta(days=1)
+ date_index = cols.index(date_name)
+ cols.pop(date_index)
+ temp = {}
+ for i in range(len(cols)):
+ col = cols[i]
+ if col == target :
+ if arg.features == 'MS' or arg.features == 'S' :
+ temp[col] = preds[-1]
+ else:
+ temp[col] = preds[i]
+ else:
+ if arg.features == 'S':
+ temp[col] = data.loc[end_at_first, col]
+ else:
+ temp[col] = preds[i]
+ temp = pd.DataFrame(temp, index=[data.shape[0]], dtype=int)
+ temp.insert(loc = date_index, column=date_name, value=next_day)
+ data = pd.concat([data, temp])
+ if days_to_predict > 1:
+ data.to_csv(temp_path, index = False)
+ #if use_predict_on_prediction and retrain:
+ # if arg.data == 'custom':
+ # arg.root_path = 'None'
+ # arg.data_path = temp_path
+ # exp.args = arg
+ # exp.train(setting)
+ # else:
+ # print("sorry can not be done")
+
+
+ if arg.features == 'S' or arg.features == 'MS':
+ data = pd.concat( [data.loc[end_at_first:,date_name], data.loc[end_at_first:,target]],axis=1)
+ else:
+ data = data.loc[end_at_first:,:]
+ data.to_csv(file_path, index = False)
+ os.unlink(temp_path)
+ print(f'''The Results of Prediction for The Next {days_to_predict} Days Are Now Stored in
+ {file_path}''')
+ return True
diff --git a/experiments/exp_long_term_forecasting.py b/experiments/exp_long_term_forecasting.py
index 1df9760a9..bb41d5a22 100644
--- a/experiments/exp_long_term_forecasting.py
+++ b/experiments/exp_long_term_forecasting.py
@@ -2,6 +2,7 @@
from experiments.exp_basic import Exp_Basic
from utils.tools import EarlyStopping, adjust_learning_rate, visual
from utils.metrics import metric
+from utils.criter import WeightedMeanAbsolutePercentageError, SymmetricMeanAbsolutePercentageError, RMSELoss,QuantileLoss, HuberLoss, PinballLoss
import torch
import torch.nn as nn
from torch import optim
@@ -9,48 +10,102 @@
import time
import warnings
import numpy as np
+from .pre_train import SaveArgs
warnings.filterwarnings('ignore')
-
+print("This is The enhanced version of Orginal code, Written in 2024")
+time.sleep(1)
class Exp_Long_Term_Forecast(Exp_Basic):
def __init__(self, args):
super(Exp_Long_Term_Forecast, self).__init__(args)
-
+ self.train_losses = []
+ self.test_losses = []
+ self.vali_losses = []
+ self.trues_during_vali = []
+ self.preds_during_vali = []
+ if args.is_training != 0:
+ try:
+ SaveArgs(args=args, path='input')
+ except:
+ print("Fail To Save The Args. Continue ..")
+ time.sleep(1)
+
+
def _build_model(self):
model = self.model_dict[self.args.model].Model(self.args).float()
-
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
-
+
def _get_data(self, flag):
data_set, data_loader = data_provider(self.args, flag)
return data_set, data_loader
-
+
def _select_optimizer(self):
- model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
+ if self.args.kind_of_optim == 'AdamW':
+ model_optim = optim.AdamW(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'SparseAdam':
+ model_optim = optim.SparseAdam(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'SGD':
+ model_optim = optim.SGD(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'RMSprop':
+ model_optim = optim.RMSprop(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'RAdam':
+ model_optim = optim.RAdam(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'NAdam':
+ model_optim = optim.NAdam(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'LBFGS':
+ model_optim = optim.LBFGS(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'Adamax':
+ model_optim = optim.Adamax(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'ASGD':
+ model_optim = optim.ASGD(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'Adadelta':
+ model_optim = optim.Adadelta(self.model.parameters(), lr=self.args.learning_rate)
+ elif self.args.kind_of_optim == 'Adagrad':
+ model_optim = optim.Adagrad(self.model.parameters(), lr=self.args.learning_rate)
+ else:
+ model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
+
return model_optim
def _select_criterion(self):
- criterion = nn.MSELoss()
+ if self.args.criter.lower() == 'wmape':
+ criterion = WeightedMeanAbsolutePercentageError()
+ elif self.args.criter.lower() == 'smape':
+ criterion = SymmetricMeanAbsolutePercentageError()
+ elif self.args.criter.lower() == 'mae':
+ criterion = nn.L1Loss()
+ elif self.args.criter.lower() == 'rmse':
+ criterion = RMSELoss()
+ elif self.args.criter.lower() == 'quantileloss':
+ criterion = QuantileLoss()
+ elif self.args.criter.lower() == 'huberloss':
+ criterion = HuberLoss()
+ elif self.args.criter.lower() == 'pinballloss':
+ criterion = PinballLoss()
+ else:
+ criterion = nn.MSELoss() # Default to Mean Squared Error
+
return criterion
def vali(self, vali_data, vali_loader, criterion):
total_loss = []
+ trues_during_vali = []
+ preds_during_vali = []
self.model.eval()
with torch.no_grad():
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
-
+
if 'PEMS' in self.args.data or 'Solar' in self.args.data:
batch_x_mark = None
batch_y_mark = None
else:
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
-
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
@@ -69,48 +124,85 @@ def vali(self, vali_data, vali_loader, criterion):
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
-
+
pred = outputs.detach().cpu()
true = batch_y.detach().cpu()
-
+
loss = criterion(pred, true)
-
+ trues_during_vali.append(batch_y.detach().cpu().numpy())
+ preds_during_vali.append(outputs.detach().cpu().numpy())
+
total_loss.append(loss)
+
total_loss = np.average(total_loss)
self.model.train()
+ try:
+ if len(self.trues_during_vali) == 0:
+ trues_during_vali = np.array(trues_during_vali)
+ preds_during_vali = np.array(preds_during_vali)
+ self.trues_during_vali = trues_during_vali.reshape(-1, trues_during_vali.shape[-2], trues_during_vali.shape[-1])
+ self.preds_during_vali = preds_during_vali.reshape(-1, preds_during_vali.shape[-2], preds_during_vali.shape[-1])
+ else:
+ shape_self_true = self.trues_during_vali.shape
+ shape_self_pred = self.preds_during_vali.shape
+
+ trues_during_vali = np.array(trues_during_vali)
+ preds_during_vali = np.array(preds_during_vali)
+ trues_during_vali = trues_during_vali.reshape(-1, trues_during_vali.shape[-2], trues_during_vali.shape[-1])
+ preds_during_vali = preds_during_vali.reshape(-1, preds_during_vali.shape[-2], preds_during_vali.shape[-1])
+ shape_funv_true = trues_during_vali.shape
+ shape_funv_pred = preds_during_vali.shape
+
+ self.trues_during_vali = self.trues_during_vali.flatten().tolist()
+ self.preds_during_vali = self.preds_during_vali.flatten().tolist()
+ trues_during_vali = trues_during_vali.flatten().tolist()
+ preds_during_vali = preds_during_vali.flatten().tolist()
+ trues_during_vali = self.trues_during_vali + trues_during_vali
+ preds_during_vali = self.preds_during_vali + preds_during_vali
+
+ trues_during_vali = np.array(trues_during_vali)
+ preds_during_vali = np.array(preds_during_vali)
+ self.trues_during_vali = trues_during_vali.reshape(shape_funv_true[0]+shape_self_true[0], shape_self_true[1], shape_self_true[2])
+ self.preds_during_vali = preds_during_vali.reshape(shape_self_pred[0]+shape_funv_pred[0], shape_self_pred[1],shape_self_pred[2])
+ except:
+ pass
return total_loss
-
+
+
def train(self, setting):
train_data, train_loader = self._get_data(flag='train')
vali_data, vali_loader = self._get_data(flag='val')
test_data, test_loader = self._get_data(flag='test')
-
+
+ trues_during_training = []
+ preds_during_training = []
+
path = os.path.join(self.args.checkpoints, setting)
if not os.path.exists(path):
os.makedirs(path)
-
+
time_now = time.time()
-
+
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
-
+
model_optim = self._select_optimizer()
criterion = self._select_criterion()
-
+
if self.args.use_amp:
scaler = torch.cuda.amp.GradScaler()
-
+
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
-
+
self.model.train()
epoch_time = time.time()
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.float().to(self.device)
-
+
batch_y = batch_y.float().to(self.device)
if 'PEMS' in self.args.data or 'Solar' in self.args.data:
batch_x_mark = None
@@ -118,11 +210,11 @@ def train(self, setting):
else:
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
-
+
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
-
+
# encoder - decoder
if self.args.use_amp:
with torch.cuda.amp.autocast():
@@ -130,7 +222,7 @@ def train(self, setting):
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
-
+
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
@@ -141,13 +233,15 @@ def train(self, setting):
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
-
+
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
loss = criterion(outputs, batch_y)
+ preds_during_training.append(outputs.detach().cpu().numpy())
+ trues_during_training.append(batch_y.detach().cpu().numpy())
train_loss.append(loss.item())
-
+
if (i + 1) % 100 == 0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
@@ -155,7 +249,7 @@ def train(self, setting):
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
-
+
if self.args.use_amp:
scaler.scale(loss).backward()
scaler.step(model_optim)
@@ -163,28 +257,80 @@ def train(self, setting):
else:
loss.backward()
model_optim.step()
-
+
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
-
+ self.train_losses.append(train_loss)
+ self.test_losses.append(test_loss)
+ self.vali_losses.append(vali_loss)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
-
+
adjust_learning_rate(model_optim, epoch + 1, self.args)
-
+
# get_cka(self.args, setting, self.model, train_loader, self.device, epoch)
-
+
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
-
+
+ preds_during_training = np.array(preds_during_training)
+ trues_during_training = np.array(trues_during_training)
+ print('\n')
+ print('train shape:', preds_during_training.shape, trues_during_training.shape)
+ preds_during_training = preds_during_training.reshape(-1, preds_during_training.shape[-2], preds_during_training.shape[-1])
+ trues_during_training = trues_during_training.reshape(-1, trues_during_training.shape[-2], trues_during_training.shape[-1])
+ print('train shape:', preds_during_training.shape, trues_during_training.shape)
+ # result save
+ folder_path = './results/' + setting + '/'
+ if not os.path.exists(folder_path):
+ os.makedirs(folder_path)
+
+ mae, mse, rmse, mape, mspe = metric(preds_during_training, trues_during_training)
+
+ print('Train mse:{},Train mae:{}'.format(mse, mae))
+ print('Train rmse:{},Train mape:{}'.format(rmse, mape))
+ print('\n')
+ time.sleep(2)
+ f = open("result_long_term_forecast.txt", 'a')
+ f.write(setting + " \n")
+ f.write('Train mse:{},Train mae:{}'.format(mse, mae))
+ f.write('\n')
+ f.write('\n')
+ f.close()
+
+ np.save(folder_path + 'metrics_during_training.npy', np.array([mae, mse, rmse, mape, mspe]))
+ np.save(folder_path + 'preds_during_training.npy', preds_during_training)
+ np.save(folder_path + 'trues_during_training.npy', trues_during_training)
+ try:
+ preds_during_vali = np.array(self.preds_during_vali)
+ trues_during_vali = np.array(self.trues_during_vali)
+ print('Validate shape:', (preds_during_vali.shape[0]//self.args.batch_size, self.args.batch_size, preds_during_vali.shape[1],preds_during_vali.shape[2]),(trues_during_vali.shape[0]//self.args.batch_size, self.args.batch_size, trues_during_vali.shape[1],trues_during_vali.shape[2]))
+ preds_during_vali = preds_during_vali.reshape(-1, preds_during_vali.shape[-2], preds_during_vali.shape[-1])
+ trues_during_vali = trues_during_vali.reshape(-1, trues_during_vali.shape[-2], trues_during_vali.shape[-1])
+ print('Validate shape:', preds_during_vali.shape, trues_during_vali.shape)
+
+ mae, mse, rmse, mape, _ = metric(preds_during_vali, trues_during_vali)
+ print('Validate mse:{},Validate mae:{}'.format(mse, mae))
+ print('Validate rmse:{},Validate mape:{}'.format(rmse, mape))
+ print('\n')
+ time.sleep(2)
+ f = open("result_long_term_forecast.txt", 'a')
+ f.write("Validate Info:" + " \n")
+ f.write('mse:{}, mae:{}'.format(mse, mae))
+ f.write('\n')
+ f.write('\n')
+ f.close()
+ except:
+ pass
return self.model
-
+
+
def test(self, setting, test=0):
test_data, test_loader = self._get_data(flag='test')
if test:
@@ -236,10 +382,10 @@ def test(self, setting, test=0):
shape = outputs.shape
outputs = test_data.inverse_transform(outputs.squeeze(0)).reshape(shape)
batch_y = test_data.inverse_transform(batch_y.squeeze(0)).reshape(shape)
-
+
pred = outputs
true = batch_y
-
+
preds.append(pred)
trues.append(true)
if i % 20 == 0:
@@ -249,8 +395,9 @@ def test(self, setting, test=0):
input = test_data.inverse_transform(input.squeeze(0)).reshape(shape)
gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)
pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)
- visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))
-
+ if self.args.do_visual:
+ visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))
+
preds = np.array(preds)
trues = np.array(trues)
print('test shape:', preds.shape, trues.shape)
@@ -264,14 +411,15 @@ def test(self, setting, test=0):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
- print('mse:{}, mae:{}'.format(mse, mae))
+ print('Test mse:{},Test mae:{}'.format(mse, mae))
+ print('Test rmse:{},Test mape:{}'.format(rmse, mape))
f = open("result_long_term_forecast.txt", 'a')
f.write(setting + " \n")
f.write('mse:{}, mae:{}'.format(mse, mae))
f.write('\n')
f.write('\n')
f.close()
-
+
np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path + 'pred.npy', preds)
np.save(folder_path + 'true.npy', trues)
@@ -281,14 +429,14 @@ def test(self, setting, test=0):
def predict(self, setting, load=False):
pred_data, pred_loader = self._get_data(flag='pred')
-
+
if load:
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
-
+
preds = []
-
+
self.model.eval()
with torch.no_grad():
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):
@@ -296,7 +444,7 @@ def predict(self, setting, load=False):
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
-
+
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
@@ -313,19 +461,23 @@ def predict(self, setting, load=False):
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
outputs = outputs.detach().cpu().numpy()
+ batch_y = batch_y.detach().cpu().numpy()
+ self.batch_y = batch_y
if pred_data.scale and self.args.inverse:
shape = outputs.shape
outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)
preds.append(outputs)
-
+
preds = np.array(preds)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
-
# result save
folder_path = './results/' + setting + '/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
-
- np.save(folder_path + 'real_prediction.npy', preds)
-
- return
\ No newline at end of file
+
+ pred_save_path = folder_path + 'Preds real_prediction.npy'
+ np.save(folder_path + 'Preds real_prediction.npy', preds)
+
+ print(f'''The Results of Prediction for The Next {self.args.pred_len} Days Are Now Stored in
+ {pred_save_path}''')
+ return
diff --git a/experiments/pre_train.py b/experiments/pre_train.py
new file mode 100644
index 000000000..61df5b092
--- /dev/null
+++ b/experiments/pre_train.py
@@ -0,0 +1,92 @@
+import os
+import json
+import time
+import tempfile
+
+class DotDict:
+
+ def __init__(self, dictionary):
+ self.__dict__.update(dictionary)
+
+ def __getattr__(self, attr):
+ try:
+ return self.__dict__[attr]
+ except KeyError:
+ self.__setattr__(attr, False)
+ return self.__dict__[attr]
+
+ def __setattr__(self, key, value):
+ self.__dict__[key] = value
+
+ def __str__(self):
+ return str(self.__dict__)
+
+
+def load_args(path):
+
+ with open(path, 'r') as f:
+ loaded_args = json.load(f)
+
+ return DotDict(loaded_args)
+
+
+
+class SaveArgs:
+
+ def __init__(self, args, path, temporary = False) :
+
+ if not isinstance(args, dict):
+ raise TypeError("THis CLass ONly SUpports DIctionary AS AN INput!")
+ self.args = args
+ self.path = path
+ self.temporary = temporary
+
+ self.__start__()
+
+
+ def __start__(self) :
+
+ temp = {}
+ for any_key, any_val in self.args.items() :
+
+ temp[any_key] = any_val
+
+ self.__save__(temp)
+
+
+ def __path_checker__(self):
+
+ if self.temporary:
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
+ file_path = temp_file.name + '.json'
+
+ self.path = file_path
+ return
+ else:
+ os.makedirs( self.path, exist_ok=True)
+ file_path = os.path.join(self.path, 'args.json')
+ if os.path.exists(file_path):
+ base, ext = os.path.splitext(file_path)
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ file_path = f"{base}_{timestamp}{ext}"
+
+ self.path = file_path
+
+
+ def __save__(self, arg):
+
+ try:
+ self.__path_checker__()
+ with open(self.path, 'w') as file :
+ json.dump(arg, file)
+ except:
+ print("Fail to Save Args - continue..")
+ return
+ if self.temporary:
+ pass
+ else:
+ print(f"Args Object Saved to {self.path}")
+ #print("It Can be further used by pickle.load()")
+
+ def __repr__(self) -> str:
+ return "cloner174 in github 2024"
\ No newline at end of file
diff --git a/input/pred/DONOTREMOVE b/input/pred/DONOTREMOVE
new file mode 100644
index 000000000..e69de29bb
diff --git a/input/train/DONOTREMOVE b/input/train/DONOTREMOVE
new file mode 100644
index 000000000..e69de29bb
diff --git a/results/DONOTREMOVE b/results/DONOTREMOVE
new file mode 100644
index 000000000..e69de29bb
diff --git a/run.ipynb b/run.ipynb
new file mode 100644
index 000000000..64848784f
--- /dev/null
+++ b/run.ipynb
@@ -0,0 +1,938 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install reformer_pytorch"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " date | \n",
+ " Open | \n",
+ " High | \n",
+ " Low | \n",
+ " Final | \n",
+ " Volume | \n",
+ " Close | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2001-03-25 | \n",
+ " 26000 | \n",
+ " 26000 | \n",
+ " 24885 | \n",
+ " 24885 | \n",
+ " 4520 | \n",
+ " 24885 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2001-03-26 | \n",
+ " 24885 | \n",
+ " 24885 | \n",
+ " 24885 | \n",
+ " 24885 | \n",
+ " 45 | \n",
+ " 24885 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2001-04-08 | \n",
+ " 25000 | \n",
+ " 25000 | \n",
+ " 25000 | \n",
+ " 25000 | \n",
+ " 2000 | \n",
+ " 25000 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 2001-04-10 | \n",
+ " 25003 | \n",
+ " 25003 | \n",
+ " 25003 | \n",
+ " 25003 | \n",
+ " 70 | \n",
+ " 25003 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 2001-04-15 | \n",
+ " 25010 | \n",
+ " 25010 | \n",
+ " 25010 | \n",
+ " 25010 | \n",
+ " 120 | \n",
+ " 25010 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " date Open High Low Final Volume Close\n",
+ "0 2001-03-25 26000 26000 24885 24885 4520 24885\n",
+ "1 2001-03-26 24885 24885 24885 24885 45 24885\n",
+ "2 2001-04-08 25000 25000 25000 25000 2000 25000\n",
+ "3 2001-04-10 25003 25003 25003 25003 70 25003\n",
+ "4 2001-04-15 25010 25010 25010 25010 120 25010"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#دیتا رو فرا میخونیم برای اینکه تقسیمش کنیم بین دو تا فولدر!\n",
+ "\n",
+ "# فرض میکنیم اسمش دیتاست.سی اس وی واون رو بعد از چک کردن به دو فولدر کپی میکنیم!\n",
+ "\n",
+ "import pandas as pd\n",
+ "\n",
+ "df = pd.read_csv('input/train/data.csv')\n",
+ "\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df.to_csv('input/train/data.csv', index=False)\n",
+ "df.to_csv('input/test/data.csv', index=False)\n",
+ "\n",
+ "print(\"Now We Have Two Data set, Same as each other , But one of them going to be used during pred\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# اول فرض میکنیم شما دیروز مدلتون رو تمرین دادید و آماده هست و در درایو یا جای دیگه ای سیو شده\n",
+ "# بهتره دوباره ۱۸ ساعت رو تلف نکنیم برای ترین دوباره ، شما اینطور فکر نمیکنید ؟:) ینی چی؟ ببینیم \n",
+ "# ما هنگام ترین دادن مدل یک پیغام مشاهده کردیم . این شکلی بود :\n",
+ "# Args Object Saved to input/args.json\n",
+ "# همچنین یکی دیگه :\n",
+ "# test_iTransformer_custom_MS_ft5_sl1_ll1_pl1_dm1_nh1_el1_dl1_df1_fctimeF_ebTrue_dttest_projection_0\n",
+ "# اولی ینی آرگز در واقع ست آپ مدله. و دومی چکپوینت یا دیکشنری وضعیت در واقع همه ی اون ۱۸ ساعتی که راجبش حرف زدیمو توی خودش داره\n",
+ "# ما اول اینا رو پیدا میکنیم\n",
+ "# بعد اگر بخوایم میتونیم با یه دیتای جدید هم دوباره مدل رو تمرین بدیم ولی نه از اول بلکه ادامه ی کارش. این هم در هزینه و هم انرژی و هم\n",
+ "# حفظ عقلانیت بهمون کمک میکنه\n",
+ "# It tipically save it in input folder:\n",
+ "# همونطور که بالا بصورت قلمبه و سلمبه نوشتم : معمولا توی اینپوت سیو میشه.\n",
+ "# و اگر بیشتر از یه فایل اونجا هست بهتره که اون ساعت و تاریخی\n",
+ "# رو بردارید که زیر همون تمرین ثبت شدتون نوشته شده\n",
+ "# همچنین اسم فولدری که مدل توش سیو شده یا همون چک پوینت توش سیو شده، به عنوان مدل توی تابع شناخته میشه\n",
+ "\n",
+ "\n",
+ "#تابع لود آرگز ادرس فایل آرگز دات جیسون رو ازتون میگیره و همون آرگز رو بهتون تحویل میده که موقع تمرین مدل ساختید\n",
+ "# دقت کنید که میتونید که میتونید خود فایل رو هم فراخونی کنید اگر بخواید:\n",
+ "# اینجا روشش رو بصورت کامنت آوردم\n",
+ "\n",
+ "#from experiments.pre_train import load_args\n",
+ "#\n",
+ "# path_to_saved_args = '****************'# به جای این ستاره ها اسم همون فایل رو بزارید مثلا مال من این بود: input/args_20240611_174533.json\n",
+ "#loaded_arg = load_args(path_to_saved_args)\n",
+ "#print(f\"Args object loaded from {path_to_saved_args}\")\n",
+ "#print(loaded_arg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# اگر بالا رو ران کنید میتونید این کارا رو انجام بدین\n",
+ "\n",
+ "#loaded_arg.test_size = None\n",
+ "#loaded_arg.max_use_of_row = 'No Lim'\n",
+ "#loaded_arg.pred_data_path = 'data.csv'\n",
+ "#loaded_arg.is_training = 0 #beacuse it is predicting!:)\n",
+ "#print(loaded_arg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# این اسم فولدریه که توش چک پوینت هست\n",
+ "# زیر همون تمرینی که میدید به مدل نوشته شده\n",
+ "\n",
+ "checkpoints_folder_name = 'test_iTransformer_custom_MS_ft5_sl1_ll1_pl1_dm1_nh1_el1_dl1_df1_fctimeF_ebTrue_dttest_projection_0'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# به جای این ستاره ها اسم همون فایل رو بزارید مثلا مال من این بود: input/args_20240611_182910.json\n",
+ "\n",
+ "path_to_saved_args = '****************'\n",
+ "path_to_saved_args = 'input/args_20240611_182910.json'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from experiments.after_train import predict"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Use CPU\n",
+ "Fail To Save The Args. Continue ..\n",
+ "pred 1\n",
+ "pred 1\n",
+ "The Results of Prediction for The Next 2 Days Are Now Stored in \n",
+ " results/Prediction Results/prediction_20240611_183102.csv\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "predict(args= path_to_saved_args, model= checkpoints_folder_name , days_to_predict = 2, retrain= False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " date | \n",
+ " Close | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2023-11-08 | \n",
+ " 14300 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2023-11-09 | \n",
+ " 14346 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2023-11-10 | \n",
+ " 14293 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " date Close\n",
+ "0 2023-11-08 14300\n",
+ "1 2023-11-09 14346\n",
+ "2 2023-11-10 14293"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "pd.read_csv('results/Prediction Results/prediction_20240611_183102.csv')#این همین آدرسیه که این بالا نوشته"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# OR OR ##"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# اگر اینا رو ندارید و میخواید ترین بدید مدل رو و بعدش پیش بینی کنید بفرمایید"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# OR IF YOU HAVE NOT YOUR ARGS ! HERE LETS TRAIN FROM SCRATCH ! #"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define the arg object\n",
+ "from utils.tools import dotdict\n",
+ "\n",
+ "arg = dotdict() # $$ این همون آرگز معروفه که ز ازش افتاده بنده خدا\n",
+ "\n",
+ "arg.root_path = 'input/train'\n",
+ "arg.data_path = 'data.csv'\n",
+ "arg.pred_root_path = 'input/pred'\n",
+ "arg.pred_data_path = 'data.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Args in experiment:\n",
+ "{'root_path': 'input/train', 'data_path': 'data.csv', 'pred_root_path': 'input/pred', 'pred_data_path': 'data.csv', 'scale': True, 'test_size': 0.2, 'kind_of_scaler': 'MinMax', 'name_of_col_with_date': 'date', 'kind_of_optim': 'default', 'criter': 'default', 'do_visual': False, 'max_use_of_row': 'No Lim', 'is_training': 1, 'model_id': 'test', 'model': 'iTransformer', 'data': 'custom', 'features': 'MS', 'target': 'Close', 'freq': 'b', 'checkpoints': './checkpoints/', 'seq_len': 5, 'label_len': 1, 'pred_len': 1, 'enc_in': 6, 'dec_in': 6, 'c_out': 1, 'd_model': 1, 'n_heads': 1, 'e_layers': 1, 'd_layers': 1, 'd_ff': 1, 'moving_avg': 25, 'factor': 1, 'distil': True, 'dropout': 0.1, 'embed': 'timeF', 'activation': 'ReLU', 'num_workers': 1, 'itr': 1, 'train_epochs': 2, 'batch_size': 16, 'patience': 2, 'learning_rate': 0.9, 'des': 'test', 'loss': 'MSE', 'lradj': 'type1', 'use_amp': False, 'use_gpu': False, 'gpu': 0, 'use_multi_gpu': False, 'devices': '0,1,2,3', 'exp_name': 'MTSF', 'channel_independence': False, 'inverse': False, 'class_strategy': 'projection', 'efficient_training': False, 'use_norm': True, 'partial_start_index': 0}\n"
+ ]
+ }
+ ],
+ "source": [
+ "import torch\n",
+ "from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast\n",
+ "from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial\n",
+ "import random\n",
+ "import numpy as np\n",
+ "\n",
+ "# Setting the seed\n",
+ "fix_seed = 2023\n",
+ "random.seed(fix_seed)\n",
+ "torch.manual_seed(fix_seed)\n",
+ "np.random.seed(fix_seed)\n",
+ "\n",
+ "\n",
+ "# NEW OPTIONS : #\n",
+ "arg.scale = True\n",
+ "arg.test_size = 0.2\n",
+ "arg.kind_of_scaler = 'MinMax'\n",
+ "arg.name_of_col_with_date = 'date'\n",
+ "arg.kind_of_optim = 'default'\n",
+ "arg.criter = 'default'\n",
+ "arg.do_visual = False\n",
+ "arg.max_use_of_row = 'No Lim'#It also can be 'All Except a Week' or 'All Except 3 Days'\n",
+ "# # #\n",
+ "\n",
+ "arg.is_training = 1\n",
+ "arg.model_id = 'test'\n",
+ "arg.model = 'iTransformer'\n",
+ "arg.data = 'custom'\n",
+ "arg.features = 'MS'\n",
+ "arg.target = 'Close'\n",
+ "arg.freq = 'b'\n",
+ "arg.checkpoints = './checkpoints/'\n",
+ "arg.seq_len = 1*5*1\n",
+ "arg.label_len = 1*1\n",
+ "arg.pred_len = 1*1\n",
+ "arg.enc_in = 6\n",
+ "arg.dec_in = 6\n",
+ "arg.c_out = 1\n",
+ "arg.d_model = 1\n",
+ "arg.n_heads = 1\n",
+ "arg.e_layers = 1\n",
+ "arg.d_layers = 1\n",
+ "arg.d_ff = 1\n",
+ "arg.moving_avg = 25\n",
+ "arg.factor = 1\n",
+ "arg.distil = True\n",
+ "arg.dropout = 0.1\n",
+ "arg.embed = 'timeF'\n",
+ "arg.activation = 'ReLU'\n",
+ "arg.num_workers = 1\n",
+ "arg.itr = 1\n",
+ "arg.train_epochs = 2\n",
+ "arg.batch_size = 16\n",
+ "arg.patience = 2\n",
+ "arg.learning_rate = 0.9\n",
+ "arg.des = 'test'\n",
+ "arg.loss = 'MSE'\n",
+ "arg.lradj = 'type1'\n",
+ "arg.use_amp = False\n",
+ "arg.use_gpu = True if torch.cuda.is_available() else False\n",
+ "arg.gpu = 0\n",
+ "arg.use_multi_gpu = False\n",
+ "arg.devices = '0,1,2,3'\n",
+ "arg.exp_name = 'MTSF'\n",
+ "arg.channel_independence = False\n",
+ "arg.inverse = False\n",
+ "arg.class_strategy = 'projection'\n",
+ "arg.efficient_training = False\n",
+ "arg.use_norm = True\n",
+ "arg.partial_start_index = 0\n",
+ "\n",
+ "print('Args in experiment:')\n",
+ "print(arg)\n",
+ "\n",
+ "if arg.exp_name == 'partial_train':\n",
+ " Exp = Exp_Long_Term_Forecast_Partial\n",
+ "else:\n",
+ " Exp = Exp_Long_Term_Forecast\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Use CPU\n",
+ "Args Object Saved to input/args_20240611_182910.json\n",
+ ">>>>>>>start training : test_iTransformer_custom_MS_ft5_sl1_ll1_pl1_dm1_nh1_el1_dl1_df1_fctimeF_ebTrue_dttest_projection_0>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
+ "train 2578\n",
+ "val 370\n",
+ "test 738\n",
+ "\titers: 100, epoch: 1 | loss: 0.0002855\n",
+ "\tspeed: 0.0182s/iter; left time: 4.0624s\n",
+ "Epoch: 1 cost time: 2.5937039852142334\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[W NNPACK.cpp:64] Could not initialize NNPACK! Reason: Unsupported hardware.\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch: 1, Steps: 161 | Train Loss: 0.0045442 Vali Loss: 0.0050075 Test Loss: 0.0024899\n",
+ "Validation loss decreased (inf --> 0.005007). Saving model ...\n",
+ "Updating learning rate to 0.9\n",
+ "\titers: 100, epoch: 2 | loss: 0.0002948\n",
+ "\tspeed: 0.0697s/iter; left time: 4.3202s\n",
+ "Epoch: 2 cost time: 2.6332802772521973\n",
+ "Epoch: 2, Steps: 161 | Train Loss: 0.0012735 Vali Loss: 0.0055475 Test Loss: 0.0031544\n",
+ "EarlyStopping counter: 1 out of 2\n",
+ "Updating learning rate to 0.45\n",
+ "\n",
+ "\n",
+ "train shape: (322, 16, 1, 1) (322, 16, 1, 1)\n",
+ "train shape: (5152, 1, 1) (5152, 1, 1)\n",
+ "Train mse:0.002908848924562335,Train mae:0.01775975525379181\n",
+ "Train rmse:0.05393374711275101,Train mape:inf\n",
+ "\n",
+ "\n",
+ "Validate shape: (138, 16, 1, 1) (138, 16, 1, 1)\n",
+ "Validate shape: (2212, 1, 1) (2212, 1, 1)\n",
+ "Validate mse:0.003639126092588629,Validate mae:0.028315382694680695\n",
+ "Validate rmse:0.06032516964409324,Validate mape:0.049521959696378764\n",
+ "\n",
+ "\n",
+ ">>>>>>>testing : test_iTransformer_custom_MS_ft5_sl1_ll1_pl1_dm1_nh1_el1_dl1_df1_fctimeF_ebTrue_dttest_projection_0<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
+ "test 738\n",
+ "test shape: (738, 1, 1, 1) (738, 1, 1, 1)\n",
+ "test shape: (738, 1, 1) (738, 1, 1)\n",
+ "Test mse:0.002489923033863306,Test mae:0.022032689303159714\n",
+ "Test rmse:0.04989912733435631,Test mape:0.04616886377334595\n"
+ ]
+ }
+ ],
+ "source": [
+ "if arg.is_training:\n",
+ " for ii in range(arg.itr):\n",
+ " # setting record of experiments\n",
+ " setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n",
+ " arg.model_id,\n",
+ " arg.model,\n",
+ " arg.data,\n",
+ " arg.features,\n",
+ " arg.seq_len,\n",
+ " arg.label_len,\n",
+ " arg.pred_len,\n",
+ " arg.d_model,\n",
+ " arg.n_heads,\n",
+ " arg.e_layers,\n",
+ " arg.d_layers,\n",
+ " arg.d_ff,\n",
+ " arg.factor,\n",
+ " arg.embed,\n",
+ " arg.distil,\n",
+ " arg.des,\n",
+ " arg.class_strategy, ii)\n",
+ " \n",
+ " exp = Exp(arg) # set experiments\n",
+ " print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))\n",
+ " exp.train(setting)\n",
+ " \n",
+ " print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n",
+ " \n",
+ " train_losses = exp.train_losses##### --->>> Use These To Plot the Loss Values\n",
+ " test_losses = exp.test_losses#### --->>> Use These To Plot the Loss Values\n",
+ " \n",
+ " exp.test(setting)\n",
+ " \n",
+ " if arg.do_predict:\n",
+ " print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n",
+ " exp.predict(setting, True)\n",
+ " \n",
+ " torch.cuda.empty_cache()\n",
+ "else:\n",
+ " ii = 0\n",
+ " setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n",
+ " arg.model_id,\n",
+ " arg.model,\n",
+ " arg.data,\n",
+ " arg.features,\n",
+ " arg.seq_len,\n",
+ " arg.label_len,\n",
+ " arg.pred_len,\n",
+ " arg.d_model,\n",
+ " arg.n_heads,\n",
+ " arg.e_layers,\n",
+ " arg.d_layers,\n",
+ " arg.d_ff,\n",
+ " arg.factor,\n",
+ " arg.embed,\n",
+ " arg.distil,\n",
+ " arg.des,\n",
+ " arg.class_strategy, ii)\n",
+ " \n",
+ " exp = Exp(arg) # set experiments\n",
+ " print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n",
+ " exp.test(setting, test=1)\n",
+ " torch.cuda.empty_cache()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# مستقیم بریم برای پیشبینی چون همه چیز در روند ترین مشخص شده\n",
+ "\n",
+ "from experiments.after_train import predict"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "The Results of Prediction for The Next 5 Days Are Now Stored in \n",
+ " results/Prediction Results/prediction_20240611_175838.csv\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "predict(args= arg, model=exp, days_to_predict=5)# این آرگ که این جا نوشتم یکم بالا تر با علامت دلار چلوش کامنت گذاشتم"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "predicted_path = 'results/Prediction Results/prediction_20240611_175838.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " date | \n",
+ " Close | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2023-11-08 | \n",
+ " 14300 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2023-11-09 | \n",
+ " 14346 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2023-11-10 | \n",
+ " 14293 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 2023-11-11 | \n",
+ " 14247 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 2023-11-12 | \n",
+ " 14255 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 2023-11-13 | \n",
+ " 14212 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " date Close\n",
+ "0 2023-11-08 14300\n",
+ "1 2023-11-09 14346\n",
+ "2 2023-11-10 14293\n",
+ "3 2023-11-11 14247\n",
+ "4 2023-11-12 14255\n",
+ "5 2023-11-13 14212"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "pd.read_csv(predicted_path)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "یاد تونه بالا گفتم میتونید آرگز رو خودتون لود کنید\n",
+ "\n",
+ "بدرد این میخوره مثلا\n",
+ "\n",
+ "فکر کنید شما با حالت ام اس مدل رو ترین دادید . ینی بالایی\n",
+ "\n",
+ "حالا من تقلب میکنم و به آرگ میگم تو ام خالی هستی و بعد جای ارگز در مدل رو با این عوض میکنم \n",
+ "\n",
+ "و میبینیم که گول میخوره و همه ی پیش بینی هاشو برامون رو میکنه\n",
+ "\n",
+ "این فقط در حالت بین ام اس و اس ممکنه"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "arg.features = 'M'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "exp.args = arg"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "pred 1\n",
+ "The Results of Prediction for The Next 5 Days Are Now Stored in \n",
+ " results/Prediction Results/prediction_20240611_183425.csv\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "predict(args=arg, model=exp, days_to_predict=5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " date | \n",
+ " Open | \n",
+ " High | \n",
+ " Low | \n",
+ " Final | \n",
+ " Volume | \n",
+ " Close | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2023-11-08 | \n",
+ " 14600 | \n",
+ " 14600 | \n",
+ " 14230 | \n",
+ " 14550 | \n",
+ " 748934 | \n",
+ " 14300 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2023-11-09 | \n",
+ " 14446 | \n",
+ " 14716 | \n",
+ " 14172 | \n",
+ " 14544 | \n",
+ " 1912825 | \n",
+ " 14346 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2023-11-10 | \n",
+ " 14435 | \n",
+ " 14664 | \n",
+ " 14143 | \n",
+ " 14515 | \n",
+ " 1500316 | \n",
+ " 14293 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 2023-11-11 | \n",
+ " 14363 | \n",
+ " 14607 | \n",
+ " 14105 | \n",
+ " 14483 | \n",
+ " 1364399 | \n",
+ " 14247 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 2023-11-12 | \n",
+ " 14398 | \n",
+ " 14598 | \n",
+ " 14097 | \n",
+ " 14460 | \n",
+ " 1341792 | \n",
+ " 14255 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 2023-11-13 | \n",
+ " 14372 | \n",
+ " 14560 | \n",
+ " 14076 | \n",
+ " 14436 | \n",
+ " 1246474 | \n",
+ " 14212 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " date Open High Low Final Volume Close\n",
+ "0 2023-11-08 14600 14600 14230 14550 748934 14300\n",
+ "1 2023-11-09 14446 14716 14172 14544 1912825 14346\n",
+ "2 2023-11-10 14435 14664 14143 14515 1500316 14293\n",
+ "3 2023-11-11 14363 14607 14105 14483 1364399 14247\n",
+ "4 2023-11-12 14398 14598 14097 14460 1341792 14255\n",
+ "5 2023-11-13 14372 14560 14076 14436 1246474 14212"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "pd.read_csv('results/Prediction Results/prediction_20240611_183425.csv')"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/run.py b/run.py
index a50bbdbf6..0a9539b3f 100644
--- a/run.py
+++ b/run.py
@@ -1,166 +1,222 @@
-import argparse
+from utils.tools import dotdict
import torch
from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast
from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial
import random
import numpy as np
-if __name__ == '__main__':
- fix_seed = 2023
- random.seed(fix_seed)
- torch.manual_seed(fix_seed)
- np.random.seed(fix_seed)
-
- parser = argparse.ArgumentParser(description='iTransformer')
-
- # basic config
- parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
- parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
- parser.add_argument('--model', type=str, required=True, default='iTransformer',
- help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
-
- # data loader
- parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
- parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
- parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
- parser.add_argument('--features', type=str, default='M',
- help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
- parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
- parser.add_argument('--freq', type=str, default='h',
- help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
- parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
-
- # forecasting task
- parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
- parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
- parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
-
- # model define
- parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
- parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
- parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
- parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
- parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
- parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
- parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
- parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
- parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
- parser.add_argument('--factor', type=int, default=1, help='attn factor')
- parser.add_argument('--distil', action='store_false',
- help='whether to use distilling in encoder, using this argument means not using distilling',
- default=True)
- parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
- parser.add_argument('--embed', type=str, default='timeF',
- help='time features encoding, options:[timeF, fixed, learned]')
- parser.add_argument('--activation', type=str, default='gelu', help='activation')
- parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
- parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
-
- # optimization
- parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
- parser.add_argument('--itr', type=int, default=1, help='experiments times')
- parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
- parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
- parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
- parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
- parser.add_argument('--des', type=str, default='test', help='exp description')
- parser.add_argument('--loss', type=str, default='MSE', help='loss function')
- parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
- parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
-
- # GPU
- parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
- parser.add_argument('--gpu', type=int, default=0, help='gpu')
- parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
- parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
-
- # iTransformer
- parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
- help='experiemnt name, options:[MTSF, partial_train]')
- parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
- parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
- parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
- parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
- parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
- parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
- parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
- parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
- 'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
-
- args = parser.parse_args()
- args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
-
- if args.use_gpu and args.use_multi_gpu:
- args.devices = args.devices.replace(' ', '')
- device_ids = args.devices.split(',')
- args.device_ids = [int(id_) for id_ in device_ids]
- args.gpu = args.device_ids[0]
-
- print('Args in experiment:')
- print(args)
-
- if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
- Exp = Exp_Long_Term_Forecast_Partial
- else: # MTSF: multivariate time series forecasting
- Exp = Exp_Long_Term_Forecast
-
-
- if args.is_training:
- for ii in range(args.itr):
- # setting record of experiments
- setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
- args.model_id,
- args.model,
- args.data,
- args.features,
- args.seq_len,
- args.label_len,
- args.pred_len,
- args.d_model,
- args.n_heads,
- args.e_layers,
- args.d_layers,
- args.d_ff,
- args.factor,
- args.embed,
- args.distil,
- args.des,
- args.class_strategy, ii)
-
- exp = Exp(args) # set experiments
- print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
- exp.train(setting)
-
- print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
- exp.test(setting)
-
- if args.do_predict:
- print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
- exp.predict(setting, True)
-
- torch.cuda.empty_cache()
+
+
+fix_seed = 2023
+random.seed(fix_seed)
+torch.manual_seed(fix_seed)
+np.random.seed(fix_seed)
+
+
+arg = dotdict()
+
+# NEW OPTIONS : #
+
+arg.scale = True
+
+arg.test_size = 0.2 # default is 0.2 which makes the training 0.7 ! #
+arg.kind_of_scaler = 'Standard' # default is 'Standard'. Another Option is 'MinMax' (recommended) #
+arg.name_of_col_with_date = 'date' # default is 'date'. Name of your date column in your dataset #
+
+arg.kind_of_optim = 'default' # default is 'Adam'.
+ #other options : 'AdamW', 'SparseAdam', 'SGD', 'RMSprop', 'RAdam', 'NAdam' ,'LBFGS',
+ # 'Adamax' 'ASGD' 'Adadelta' 'Adagrad'
+
+arg.criter = 'default' # default is nn.MSELoss ( Mean Squared Error )
+ # other options : 'wmape', 'smape', 'mae', 'rmse', 'quantileloss', 'huberloss', 'pinballloss'
+
+arg.do_visual = False
+arg.max_use_of_row = 'No Lim'#This is for prediction, Other options are: 'All Except a Week' or 'All Except 3 Days'
+
+# NEW Accessories : #
+
+#exp.path_to_saved_args
+#exp.vali_losses
+#exp.train_losses
+#exp.test_losses
+
+#####################
+
+arg.is_training = 1 # help: status
+arg.model_id = 'test'
+
+arg.model = 'iTransformer' # help: model name. options: iTransformer, iInformer, iReformer, iFlowformer, iFlashformer
+arg.data = 'custom' # help: dataset type
+
+arg.root_path = 'input/train' # help: main directory path of the data file
+arg.data_path = 'data.csv' # help: name of data csv file
+
+arg.pred_root_path = 'input/test'
+arg.pred_data_path = 'data.csv'
+
+
+arg.features = 'MS' # help: forecasting task , options: M ->multivariate predict multivariate , or
+# S ->univariate predict univariate , or
+# MS ->multivariate predict univariate
+
+arg.target = 'Close' # help: target feature in S or MS task
+
+arg.freq = 'b' # help: Freq for time features encoding. options: s ->secondly , t ->minutely, h:hourly
+# d ->daily , w ->weekly, m ->monthly
+# b ->business days
+# also more detailed freq like 15min or 3h
+
+arg.checkpoints = './checkpoints/' # help: location to save model checkpoints
+
+arg.seq_len = 1*5*3 # help: input sequence length
+arg.label_len = 1*1 # help: start token length
+arg.pred_len = 1*3 # help: prediction sequence length
+
+arg.enc_in = 6 # help: encoder input size
+arg.dec_in = 6 # help: decoder input size
+arg.c_out = 1 # help: output size -> applicable on arbitrary number of variates in inverted Transformers
+arg.d_model = 512 # help: dimension of model
+arg.n_heads = 8 # help: num of heads
+arg.e_layers = 8 # help: num of encoder layers
+arg.d_layers = 8 # help: num of decoder layers
+arg.d_ff = 2048 # help: dimension of fcn
+arg.moving_avg = 25 # help: window size of moving average
+arg.factor = 1 # help: attn factor
+arg.distil = True # help: whether to use distilling in encoder, using this argument means not using distilling
+
+arg.dropout = 0.01
+
+arg.embed = 'learned' # help: time features encoding, options: timeF OR fixed OR learned
+arg.activation = 'ReLU' # help: Name of activation Function
+
+#arg.output_attention = None # help: Whether to output attention in ecoder
+#arg.do_predict = None # help: whether to predict unseen future data
+
+arg.num_workers = 10 # help: data loader num workers
+arg.itr = 1 # help: How many times repeat experiments
+
+arg.train_epochs = 25
+
+arg.batch_size = 16
+
+arg.patience = 10 # help: early stopping patience
+
+arg.learning_rate = 0.00005
+
+arg.des = 'test' # help: exp description
+
+arg.loss = 'MSE' # help: loss function
+
+arg.lradj = 'type1' # help: adjust learning rate
+arg.use_amp = False # help: use automatic mixed precision training
+
+arg.use_gpu = True if torch.cuda.is_available() else False # help: whether to use gpu
+arg.gpu = 0 # help: GPU
+arg.use_multi_gpu = False
+arg.devices = '0,1,2,3'
+
+arg.exp_name = 'MTSF'
+
+arg.channel_independence = False # help: whether to use channel_independence mechanism
+
+arg.inverse = False # help: inverse output data
+
+arg.class_strategy = 'projection' # help: options: projection/average/cls_token
+
+
+
+
+arg.efficient_training = False # help: whether to use efficient_training (exp_name should be partial_train) | See Figure 8
+
+arg.use_norm = True # help: use norm and denorm | type=int
+
+arg.partial_start_index = 0 # help: the start index of variates for partial training,
+# you can select [partial_start_index, min(enc_in + partial_start_index, N)]
+
+#if arg.use_gpu and arg.use_multi_gpu:
+# arg.devices = arg.devices.replace(' ', '')
+# device_ids = arg.devices.split(',')
+# arg.device_ids = [int(id_) for id_ in device_ids]
+# arg.gpu = arg.device_ids[0]
+
+
+print('Args in experiment:')
+print(arg)
+
+
+
+if input("Press Enter To Start :" ) == '' :
+ pass
+else:
+ exit()
+
+if arg.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
+ Exp = Exp_Long_Term_Forecast_Partial
+else: # MTSF: multivariate time series forecasting
+ Exp = Exp_Long_Term_Forecast
+
+if arg.is_training:
+ for ii in range(arg.itr):
+ # setting record of experiments
+ setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
+ arg.model_id,
+ arg.model,
+ arg.data,
+ arg.features,
+ arg.seq_len,
+ arg.label_len,
+ arg.pred_len,
+ arg.d_model,
+ arg.n_heads,
+ arg.e_layers,
+ arg.d_layers,
+ arg.d_ff,
+ arg.factor,
+ arg.embed,
+ arg.distil,
+ arg.des,
+ arg.class_strategy, ii)
+
+ exp = Exp(arg) # set experiments
+ print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
+ exp.train(setting)
+
+ print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
+
+ train_losses = exp.train_losses##### --->>> Use These To Plot the Loss Values
+ test_losses = exp.test_losses#### --->>> Use These To Plot the Loss Values
+
+ exp.test(setting)
+
+ if arg.do_predict:
+ print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
+ exp.predict(setting, True)
+
+ torch.cuda.empty_cache()
else:
ii = 0
setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
- args.model_id,
- args.model,
- args.data,
- args.features,
- args.seq_len,
- args.label_len,
- args.pred_len,
- args.d_model,
- args.n_heads,
- args.e_layers,
- args.d_layers,
- args.d_ff,
- args.factor,
- args.embed,
- args.distil,
- args.des,
- args.class_strategy, ii)
-
- exp = Exp(args) # set experiments
+ arg.model_id,
+ arg.model,
+ arg.data,
+ arg.features,
+ arg.seq_len,
+ arg.label_len,
+ arg.pred_len,
+ arg.d_model,
+ arg.n_heads,
+ arg.e_layers,
+ arg.d_layers,
+ arg.d_ff,
+ arg.factor,
+ arg.embed,
+ arg.distil,
+ arg.des,
+ arg.class_strategy, ii)
+
+ exp = Exp(arg) # set experiments
print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.test(setting, test=1)
torch.cuda.empty_cache()
+
+#end#
diff --git a/test_results/DONOTREMOVE b/test_results/DONOTREMOVE
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/criter.py b/utils/criter.py
new file mode 100644
index 000000000..0e16662c1
--- /dev/null
+++ b/utils/criter.py
@@ -0,0 +1,105 @@
+# # in The Name of GOD # #
+#
+# # These are some simple and straightforward classes # #
+# to interact with our losses functions #
+# cloner174.org@gmail.com
+#
+import torch
+import torch.nn as nn
+
+
+
+class WeightedMeanAbsolutePercentageError(nn.Module):
+
+ def __init__(self):
+
+ super(WeightedMeanAbsolutePercentageError, self).__init__()
+
+
+ def forward(self, y_pred, y_true):
+
+ absolute_percentage_errors = torch.abs((y_true - y_pred) / (y_true + 1e-8))
+ weighted_errors = absolute_percentage_errors * (torch.abs(y_true) + 1e-8)
+
+ return torch.mean(weighted_errors)
+
+
+
+class SymmetricMeanAbsolutePercentageError(nn.Module):
+
+ def __init__(self):
+
+ super(SymmetricMeanAbsolutePercentageError, self).__init__()
+
+ def forward(self, y_pred, y_true):
+
+ absolute_percentage_errors = torch.abs((y_true - y_pred) / ((torch.abs(y_true) + torch.abs(y_pred)) / 2 + 1e-8))
+
+ return torch.mean(absolute_percentage_errors)
+
+
+
+class RMSELoss(nn.Module):
+
+ def __init__(self):
+
+ super(RMSELoss, self).__init__()
+
+
+ def forward(self, y_pred, y_true):
+
+ return torch.sqrt(torch.mean((y_pred - y_true) ** 2))
+
+
+
+class QuantileLoss(nn.Module):
+
+ def __init__(self, quantile=0.5):
+
+ super(QuantileLoss, self).__init__()
+ self.quantile = quantile
+
+
+ def forward(self, y_pred, y_true):
+
+ errors = y_true - y_pred
+ quantile_loss = torch.max((self.quantile - 1) * errors, self.quantile * errors)
+
+ return torch.mean(quantile_loss)
+
+
+
+class HuberLoss(nn.Module):
+
+ def __init__(self, delta=1.0):
+
+ super(HuberLoss, self).__init__()
+ self.delta = delta
+
+
+ def forward(self, y_pred, y_true):
+
+ errors = torch.abs(y_pred - y_true)
+ quadratic = torch.min(errors, self.delta)
+ linear = errors - quadratic
+
+ return torch.mean(0.5 * quadratic ** 2 + self.delta * linear)
+
+
+
+class PinballLoss(nn.Module):
+
+ def __init__(self, tau=0.5):
+
+ super(PinballLoss, self).__init__()
+ self.tau = tau
+
+
+ def forward(self, y_pred, y_true):
+
+ delta = y_pred - y_true
+ loss = torch.max((self.tau - 1) * delta, self.tau * delta)
+
+ return torch.mean(loss)
+
+#
diff --git a/utils/tools.py b/utils/tools.py
index c3b922ec7..e0c32d25e 100644
--- a/utils/tools.py
+++ b/utils/tools.py
@@ -1,10 +1,6 @@
-import os
-
-import numpy as np
import torch
+import numpy as np
import matplotlib.pyplot as plt
-import pandas as pd
-
plt.switch_backend('agg')
@@ -112,4 +108,4 @@ def adjustment(gt, pred):
def cal_accuracy(y_pred, y_true):
- return np.mean(y_pred == y_true)
+ return np.mean(y_pred == y_true)
\ No newline at end of file