-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrainer.py
144 lines (123 loc) · 6.44 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import argparse
import logging
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from torchvision import transforms
from datasets.dataset_synapse import Synapse_dataset, RandomGenerator
from test_func_TADE import inference
from loss import prediction2label,ordinal_regression,ordinal_regression_focal
import torch.nn.functional as F
from tools import get_error_name
import time
def trainer_synapse(args, model, snapshot_path):
logging.basicConfig(filename=snapshot_path + "log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
# max_iterations = args.max_iterations
db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split=args.train_txt,is_train = True,
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
# db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train3")
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
class PadSequence:
def __call__(self,batch):
sorted_batch = sorted(batch,key=lambda x: x['image'].shape[0],reverse=True)
sequences = [torch.from_numpy(x['image']) for x in sorted_batch]
sequences_padded = torch.nn.utils.rnn.pad_sequence(sequences,batch_first=True)
# sequences_padded = sequences_padded.to('cuda:1')
# lengths = torch.LongTensor([len(x) for x in sequences])
labels = [x['label'] for x in sorted_batch]
labels = torch.tensor(np.array(labels))
return sequences_padded, labels
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, collate_fn=PadSequence())
device = torch.device("cuda:1")
model = torch.nn.DataParallel(model)
model = model.to(device)
model = model.cuda()
model.train()
optimizer = torch.optim.Adam(model.module.parameters(), lr = args.base_lr,weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size = 20,gamma=0.5)
error_name = get_error_name(args.list_dir+args.val_txt+'.txt')
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(args.model_step,max_epoch+1), ncols=70)
for epoch_num in iterator:
error_name = get_error_name(args.list_dir+args.val_txt+'.txt')
#auc0,auc1,auc2,auc3,error_name = inference(args, model,error_name,epoch_num)
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch[0], sampled_batch[1]
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
output = model(image_batch)
# output = output.view(-1,length,5)
# output = torch.mean(output,dim=1)
# output = output.view(output.shape[0],5)
# output = torch.sigmoid(output)
# output_tran = prediction2label(output)
loss = ordinal_regression_focal(output, label_batch)
optimizer.zero_grad()
loss.backward()
scheduler.step(epoch_num)
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
# if iter_num % 20 == 0:
# image = image_batch[1, 0:1, :, :]
# image = (image - image.min()) / (image.max() - image.min())
# writer.add_image('train/Image', image, iter_num)
# outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True)
# writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num)
# labs = label_batch[1, ...].unsqueeze(0) * 50
# writer.add_image('train/GroundTruth', labs, iter_num)
save_interval = 50 # int(max_epoch/6)
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
torch.cuda.empty_cache()
time.sleep(10)
auc0,auc1,auc2,auc3,error_name,result_,Y_val_set = inference(args, model,error_name,epoch_num)
logging.info('epoch %d : auc0 : %f : auc1 : %f : auc2 : %f : auc3 : %f' % (epoch_num, auc0,auc1,auc2,auc3))
model.train()
print(args.model_path)
if epoch_num % 1 ==0:
# print(error_name)
# if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
save_result_path = os.path.join(snapshot_path, 'result_' + str(epoch_num) + '.npy')
np.save(save_result_path,{'result':result_,'label':Y_val_set})
torch.save(model.module.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.module.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
np.save('./results/error_name/'+args.val_txt+'.npy',error_name)
for num_name in error_name.keys():
with open('./results/error_name/'+args.val_txt+'.txt','a') as fp:
fp.write(num_name+':'+str(error_name[num_name])+'\n')
fp.close
writer.close()
return "Training Finished!"