-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
95 lines (80 loc) · 5.66 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# ==============================================================================
# File description: Realize the parameter configuration function of data set, model, training and verification code.
# ==============================================================================
import os
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from model import ContentLoss
from model import Discriminator
from model import Generator
# ==============================================================================
# Common configure
# ==============================================================================
torch.manual_seed(0) # Set random seed.
upscale_factor = 4 # How many times the size of the high-resolution image in the data set is than the low-resolution image.
device = torch.device("cuda:0") # Use the first GPU for processing by default.
cudnn.benchmark = True # If the dimension or type of the input data of the network does not change much, turn it on, otherwise turn it off.
mode = "train" # Run mode. Specific mode loads specific variables.
exp_name = "exp013" # Experiment name.
# ==============================================================================
# Train configure
# ==============================================================================
if mode == "train":
# Configure dataset.
train_dir = "data/DIV2K/train" # The address of the training dataset.
valid_dir = "data/DIV2K/valid" # Verify the address of the dataset.
image_size = 128 # High-resolution image size in the training dataset.
batch_size = 16 # Data batch size.
# Configure model.
discriminator = Discriminator().to(device) # Load the discriminator model.
generator = Generator().to(device) # Load the generator model.
# Resume training.
start_p_epoch = 450 # The number of initial iterations of the generator training phase. When set to 0, it means incremental training.
start_epoch = 78 # The number of initial iterations of the adversarial training phase. When set to 0, it means incremental training.
resume = True # Set to `True` to continue training from the previous training progress.
resume_p_weight = "" # Restore the weight of the generator model during generator training.
resume_d_weight = "/home/ubuntu/esrgan/results/exp013/d-best.pth" # Restore the weight of the generator model during the training of the adversarial network.
resume_g_weight = "/home/ubuntu/esrgan/results/exp013/g-best.pth" # Restore the weight of the discriminator model during the training of the adversarial network.
# Train epochs.
p_epochs = 450 # The total number of cycles of the generator training phase.
epochs = 400 # The total number of cycles in the training phase of the adversarial network.
# Loss function.
psnr_criterion = nn.MSELoss().to(device) # PSNR metrics.
pixel_criterion = nn.L1Loss().to(device) # Pixel loss.
content_criterion = ContentLoss().to(device) # Content loss.
adversarial_criterion = nn.BCEWithLogitsLoss().to(device) # Adversarial loss.
# Perceptual loss function weight.
pixel_weight = 0.01
content_weight = 1.0
adversarial_weight = 0.005
# Optimizer.
p_optimizer = optim.Adam(generator.parameters(), 0.0002, (0.9, 0.999)) # Generator model learning rate during generator training.
d_optimizer = optim.SGD(discriminator.parameters(), 0.0001, 0.9) # Discriminator learning rate during adversarial network training.
g_optimizer = optim.Adam(generator.parameters(), 0.0001, (0.9, 0.999)) # Generator learning rate during adversarial network training.
# Scheduler.
milestones = [epochs * 0.125, epochs * 0.250, epochs * 0.500, epochs * 0.750]
p_scheduler = CosineAnnealingLR(p_optimizer, p_epochs // 4, 1e-7) # Generator model scheduler during generator training.
d_scheduler = MultiStepLR(d_optimizer, list(map(int, milestones)), 0.5) # Discriminator model scheduler during adversarial training.
g_scheduler = MultiStepLR(g_optimizer, list(map(int, milestones)), 0.5) # Generator model scheduler during adversarial training.
# Training log.
writer = SummaryWriter(os.path.join("samples", "logs", exp_name))
# Additional variables.
exp_dir1 = os.path.join("samples", exp_name)
exp_dir2 = os.path.join("results", exp_name)
# ==============================================================================
# Validate configure
# ==============================================================================
if mode == "valid":
# Additional variables.
exp_dir = os.path.join("results", "test", exp_name)
# Load model.
model = Generator().to(device)
model_path = f"results/{exp_name}/g-best.pth"
# Test data address.
lr_dir = f"data/DIV2K/test/LR"
sr_dir = f"results/test/{exp_name}"
hr_dir = f"data/DIV2K/test/HR"