-
Notifications
You must be signed in to change notification settings - Fork 1
/
tools.py
80 lines (66 loc) · 2.78 KB
/
tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import numpy as np
import torch
# STE implementation
class STEQuantize(torch.autograd.Function):
#self.args.fb_quantize_limit, self.args.fb_quantize_level
@staticmethod
def forward(ctx, inputs, quant_limit, quant_level):
ctx.save_for_backward(inputs)
x_lim_abs = quant_limit
x_lim_range = 2.0 * x_lim_abs
x_input_norm = torch.clamp(inputs, -x_lim_abs, x_lim_abs)
if quant_level == 2:
outputs_int = torch.sign(x_input_norm)
else:
outputs_int = torch.round((x_input_norm +x_lim_abs) * ((quant_level - 1.0)/x_lim_range)) * x_lim_range/(quant_level - 1.0) - x_lim_abs
return outputs_int
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
# let's see what happens....
# grad_output[torch.abs(input)>1.5]=0
# grad_output[torch.abs(input)<0.5]=0
grad_output[input>1.0]=0
grad_output[input<-1.0]=0
grad_output = torch.clamp(grad_output, -0.25, +0.25)
grad_input = grad_output.clone()
return grad_input, None, None, None
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), 'checkpoint.pt')
self.val_loss_min = val_loss