-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
218 lines (191 loc) · 7.37 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
import torch
import torch.nn as nn
from dataclasses import dataclass
import os
import pickle
import math
import matplotlib.pyplot as plt
from tqdm import tqdm
from components.model import GPT, GPTConfig, CPU_Unpickler
from components.tokenizer import Tokenizer
from components.dataloader import TinyShakespere, WikiData, TinyTextBook, OpenOrca
from argparse import ArgumentParser
# Argument parsing
parser = ArgumentParser()
parser.add_argument("--init_weight", type=str, default=None)
parser.add_argument("--fix_lr", type=bool, default=True)
parser.add_argument("--dataset", type=str, default="tinytextbook")
args, leftovers = parser.parse_known_args()
@dataclass
class TrainConfig:
batch_size: int = 8
device: str = "cuda" if torch.cuda.is_available() else "cpu"
warmup_iters = 2000
learning_rate = 6e-4
lr_decay_iters = 600000
min_lr = 6e-5
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
label_smoothing = 0.0
fixed_learning_rate = 3e-5 if args.fix_lr else None # 3e-5
def get_lr(it):
# 1) linear warmup for warmup_iters steps
if it < TrainConfig.warmup_iters:
return TrainConfig.learning_rate * it / TrainConfig.warmup_iters
# 2) if it > lr_decay_iters, return min learning rate
if it > TrainConfig.lr_decay_iters:
return TrainConfig.min_lr
# 3) in between, use cosine decay down to min learning rate
decay_ratio = (it - TrainConfig.warmup_iters) / (
TrainConfig.lr_decay_iters - TrainConfig.warmup_iters
)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
return TrainConfig.min_lr + coeff * (TrainConfig.learning_rate - TrainConfig.min_lr)
def train_fn(
model: nn.Module,
epoch: int,
optimizer: torch.optim.Optimizer,
savepath: str = None,
):
best_loss = float("inf")
iter_num = 0
train_phases = ["train", "val"]
losses = {phase: [] for phase in train_phases}
if savepath and os.path.exists(savepath):
# Try loading the model and weight
try:
with open(savepath, "rb") as filehandler:
prev_train = CPU_Unpickler(filehandler).load()
best_weight = prev_train["best_weight"]
model.load_state_dict(best_weight, strict=False)
losses = prev_train["losses"]
best_loss = prev_train["best_loss"]
optimizer = prev_train["optimizer"]
iter_num = prev_train["iter_num"]
print(f"Loaded model with loss: {best_loss:0.4f}")
except Exception as e:
print(f"Could not load from path: {savepath}\n", repr(e))
for e in range(epoch):
for phase in train_phases:
is_training = phase == "train"
model.train() if is_training else model.eval()
loss, dats = (
0.0,
0.0,
)
tqdm_prog = tqdm(range(500))
for _ in tqdm_prog:
x, y = data.get_batch(phase, TrainConfig.batch_size, TrainConfig.device)
with torch.set_grad_enabled(phase == "train"):
_, batch_loss = model.forwardV2(
x,
y,
)
if is_training:
batch_loss.backward()
optimizer.step()
optimizer.zero_grad(set_to_none=True)
# LR scheduler
# only update lr when it is not fixed
if TrainConfig.fixed_learning_rate is None:
lr = get_lr(iter_num)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
else:
lr = TrainConfig.fixed_learning_rate
iter_num += 1
# Stats
dats += x.size(0)
loss += batch_loss.item() * x.size(0)
tqdm_prog.set_description(
f"Epoch {e+1} [{phase.upper()}]: Loss: {loss/dats:.4f}, lr: {lr:0.6f}"
)
epoch_loss = loss / dats
losses[phase].append(epoch_loss)
# Save training state
if losses["val"][-1] < best_loss:
best_loss = losses["val"][-1]
print(f"Best loss found: {best_loss:3.4f}")
if savepath:
with open(savepath, "wb") as filehandler:
pickle.dump(
{
"best_weight": model.state_dict(),
"best_loss": best_loss,
"losses": losses,
"optimizer": optimizer,
"iter_num": iter_num,
},
filehandler,
)
# Inference test
model.eval()
x = torch.tensor(
tokenizer.encode("He is a"), dtype=torch.int, device=TrainConfig.device
).unsqueeze(0)
print(
"Inference:",
"".join(
tokenizer.decode(
model.generate(x, max_new_tokens=256, temperature=0.5)
.detach()[0]
.tolist()
)
),
)
eps = list(range(1, len(losses["train"]) + 1))
fig = plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.plot(
eps, losses["train"], label="train", linestyle="dashed", color="tab:red"
)
plt.plot(eps, losses["val"], label="val", color="tab:red")
plt.legend(loc="upper left")
plt.tight_layout() # otherwise the right y-label is slightly clipped
plt.grid()
plt.savefig(os.path.join(os.path.dirname(savepath), "log.jpg"))
plt.close(fig)
print(f"Best loss: {best_loss:3.4f}")
if __name__ == "__main__":
# Loading dataset
tokenizer = Tokenizer()
print(f"Training on {args.dataset} dataset")
# Load dataset
if args.dataset.lower() == "wikidata":
data = WikiData(tokenizer, context_len=GPTConfig.context_len * 10)
elif args.dataset.lower() == "tinyshakespere":
data = TinyShakespere(tokenizer)
elif args.dataset.lower() == "tinytextbook":
data = TinyTextBook(tokenizer, context_len=GPTConfig.context_len * 10)
elif args.dataset.lower() == "openorca":
data = OpenOrca(tokenizer, context_len=GPTConfig.context_len * 10)
else:
raise ValueError(f"Invalid dataset name {args.dataset}")
# Model initialization
model = GPT(GPTConfig).to(TrainConfig.device)
# Load weight if defined
if args.init_weight is not None:
init_path = os.path.join(".", "logs", args.init_weight, "log.pkl")
with open(init_path, "rb") as filehandler:
prev_train = CPU_Unpickler(filehandler).load()
model.load_state_dict(prev_train["best_weight"], strict=False)
print("Weight loaded from", args.init_weight)
# Optimizer
optimizer = model.configure_optimizers(
TrainConfig.weight_decay,
TrainConfig.fixed_learning_rate
if TrainConfig.fixed_learning_rate
else TrainConfig.learning_rate,
(TrainConfig.beta1, TrainConfig.beta2),
TrainConfig.device,
)
# Training
train_fn(
model,
300,
optimizer,
os.path.join(".", "logs", args.dataset.lower(), "log.pkl"),
)