forked from mehedihasanbijoy/DPCSpell
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pipeline.py
126 lines (94 loc) · 3.91 KB
/
pipeline.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import torch
import torch.nn.functional as F
from tqdm import tqdm
from utils import basic_tokenizer
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.font_manager as fm
import warnings as wrn
wrn.filterwarnings('ignore')
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for idx, batch in enumerate(tqdm(iterator)):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output, _ = model(src, trg[:, :-1])
# output = [batch size, trg len - 1, output dim]
# trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
# output = [batch size * trg len - 1, output dim]
# trg = [batch size * trg len - 1]
# trg one hot for BCEwLogits
# trg = F.one_hot(trg, num_classes=66)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for idx, batch in enumerate(tqdm(iterator)):
src = batch.src
trg = batch.trg
output, _ = model(src, trg[:, :-1])
# output = [batch size, trg len - 1, output dim]
# trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
# output = [batch size * trg len - 1, output dim]
# trg = [batch size * trg len - 1]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def translate_sentence(sentence, src_field, trg_field, model, device, max_len=50):
model.eval()
if isinstance(sentence, str):
tokens = basic_tokenizer(sentence)
else:
tokens = sentence
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
src_mask = model.make_src_mask(src_tensor)
with torch.no_grad():
enc_src = model.encoder(src_tensor, src_mask)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
for i in range(max_len):
trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device)
trg_mask = model.make_trg_mask(trg_tensor)
with torch.no_grad():
output, attention = model.decoder(trg_tensor, enc_src, trg_mask, src_mask)
pred_token = output.argmax(2)[:, -1].item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:-1], attention
def display_attention(sentence, translation, attention, n_heads=8, n_rows=4, n_cols=2):
assert n_rows * n_cols == n_heads
prop = fm.FontProperties(fname='./Dataset/kalpurush.ttf')
fig = plt.figure(figsize=(15, 25))
for i in range(n_heads):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
_attention = attention.squeeze(0)[i].cpu().detach().numpy()
cax = ax.matshow(_attention, cmap='bone')
ax.tick_params(labelsize=12)
ax.set_xticklabels(
[''] + ['<sos>'] + [t for t in sentence] + ['<eos>'],
rotation=45, fontproperties=prop, fontsize=20
)
ax.set_yticklabels([''] + translation, fontproperties=prop, fontsize=20)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
plt.close()
if __name__ == '__main__':
pass