-
Notifications
You must be signed in to change notification settings - Fork 24
/
main.py
132 lines (118 loc) · 5.88 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from data_processing import data_process_utils
from loggers import logger
from optimizers.executor import Executor
from simulator import market_sim, post_trading_analysis
from utils import (
data_split,
load_yaml,
save_dataset_info,
parse_args,
create_hyperparameters_yaml,
)
from data_processing.complete_homological_utils import get_complete_homology
if __name__ == "__main__":
# Parse input arguments.
args = parse_args()
wb_error_detection = False
if args.experiment_id is None:
# If no experiment ID is passed, generate a new one.
experiment_id = logger.generate_id(args.model, args.target_stocks)
# Create a new configuration file containing the hyperparameters.
create_hyperparameters_yaml(experiment_id, args)
else:
# If an experiment ID is passed, use it.
experiment_id = args.experiment_id
# Replace the hyperparameters file with the new arguments passed as input.
# create_hyperparameters_yaml(experiment_id, args)
# Load the configuration file containing the hyperparameters.
hyperparameters_path = (
f"{logger.find_save_path(experiment_id)}/hyperparameters.yaml"
)
# Load the configuration file (general hyperparameters).
general_hyperparameters = load_yaml(hyperparameters_path, "general")
# Load the configuration file (model's hyperparameters).
model_hyperparameters = load_yaml(hyperparameters_path, "model")
# Load the configuration file (trading hyperparameters).
trading_hyperparameters = load_yaml(hyperparameters_path, "trading")
if args.experiment_id is not None:
general_hyperparameters['stages'] = args.stages.split(",")
# Handle the data processing stage.
if "data_processing" in general_hyperparameters["stages"]:
# Make the list of training stocks a set to avoid duplicates.
training_stocks = set(general_hyperparameters["training_stocks"])
# Make the list of target stocks a set to avoid duplicates.
target_stocks = set(general_hyperparameters["target_stocks"])
# Iterate over stocks after performing the union of sets operation (a stock can occur both in training_stocks and target_stocks).
for stock in list(training_stocks.union(target_stocks)):
data_utils = data_process_utils.DataUtils(
ticker=stock,
dataset=general_hyperparameters["dataset"],
experiment_id=experiment_id,
horizons=general_hyperparameters["horizons"],
normalization_window=general_hyperparameters["normalization_window"],
)
# Generate the data folders.
data_utils.generate_data_folders()
# Transform the data.
data_utils.process_data()
# Split the data into training, validation and test sets.
data_split(
dataset=general_hyperparameters["dataset"],
training_stocks=general_hyperparameters["training_stocks"],
target_stock=general_hyperparameters["target_stocks"],
training_ratio=general_hyperparameters["training_ratio"],
validation_ratio=general_hyperparameters["validation_ratio"],
include_target_stock_in_training=general_hyperparameters[
"include_target_stock_in_training"
],
)
# Instantiate the executor as None.
executor = None
# For 'torch_dataset_preparation' stage, instantiate the executor with proper arguments.
if "torch_dataset_preparation" in general_hyperparameters["stages"]:
executor = Executor(
experiment_id, general_hyperparameters, model_hyperparameters, torch_dataset_preparation=True
)
if "torch_dataset_preparation_backtest" in general_hyperparameters["stages"]:
executor = Executor(
experiment_id, general_hyperparameters, model_hyperparameters, torch_dataset_preparation=False, torch_dataset_preparation_backtest=True
)
if "complete_homological_structures_preparation" in general_hyperparameters["stages"]:
get_complete_homology(general_hyperparameters=general_hyperparameters, model_hyperparameters=model_hyperparameters)
# For the 'training' and 'evaluation' stages, instantiate the executor with proper arguments.
if (
"training" in general_hyperparameters["stages"]
or "evaluation" in general_hyperparameters["stages"]
):
executor = Executor(
experiment_id, general_hyperparameters, model_hyperparameters
)
if "training" in general_hyperparameters["stages"]:
try:
# Keep track of the files used in the training, validation and test sets.
save_dataset_info(
experiment_id=experiment_id,
general_hyperparameters=general_hyperparameters,
)
# Train the model.
executor.execute_training()
# Clean up the experiment folder from wandb logging files.
executor.logger_clean_up()
except:
print('Exception detected')
wb_error_detection = True
if "evaluation" in general_hyperparameters["stages"] and wb_error_detection is False:
# Out-of-sample test of the model.
executor.execute_testing()
# Clean up the experiment folder from wandb logging files.
executor.logger_clean_up()
if "backtest" in general_hyperparameters["stages"]:
# Backtest the model.
market_sim.backtest(
experiment_id=experiment_id, trading_hyperparameters=trading_hyperparameters
)
if "post_trading_analysis" in general_hyperparameters["stages"]:
# Perform a post-trading analysis.
post_trading_analysis.post_trading_analysis(
experiment_id=experiment_id, general_hyperparameters=general_hyperparameters, trading_hyperparameters=trading_hyperparameters, model_hyperparameters=model_hyperparameters
)