forked from jiny2001/dcscn-super-resolution
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevaluate.py
111 lines (80 loc) · 3.67 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
"""
Paper: "Fast and Accurate Image Super Resolution by Deep CNN with Skip Connection and Network in Network"
Ver: 2.0
Functions for evaluating model performance
Put your images under data/[your dataset name]/ and specify [your dataset name] for --test_dataset.
This script will create LR images from your test dataset and evaluate the model's performance.
--save_results=True: will provide generated HR images and bi-cubic HR images.
see output/[model_name]/data/[your test data]/ for checking result images.
Also you must put same model args as you trained.
For ex, if you trained like below,
> python train.py --scale=3
Then you must run evaluate.py like below.
> python evaluate.py --scale=3 --file=your_image_file_path
If you trained like below,
> python train.py --dataset=bsd200 --layers=8 --filters=96 --training_images=30000
Then you must run evaluate.py like below.
> python evaluate.py --layers=8 --filters=96 --file=your_image_file_path
"""
import logging
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import time
import DCSCN
from helper import args, utilty as util
args.flags.DEFINE_boolean("save_results", True, "Save result, bicubic and loss images.")
args.flags.DEFINE_boolean("compute_bicubic", False, "Compute bicubic performance.")
FLAGS = args.get()
def main(not_parsed_args):
if len(not_parsed_args) > 1:
print("Unknown args:%s" % not_parsed_args)
exit()
model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name)
if (FLAGS.frozenInference):
model.load_graph(FLAGS.frozen_graph_path)
model.build_summary_saver(with_saver=False) # no need because we are not saving any variables
else:
model.build_graph()
model.build_summary_saver()
model.init_all_variables()
if FLAGS.test_dataset == "all":
test_list = ['set5', 'set14', 'bsd100']
else:
test_list = [FLAGS.test_dataset]
for i in range(FLAGS.tests):
if (not FLAGS.frozenInference):
model.load_model(FLAGS.load_model_name, trial=i, output_log=True if FLAGS.tests > 1 else False)
if FLAGS.compute_bicubic:
for test_data in test_list:
print(test_data)
evaluate_bicubic(model, test_data)
for test_data in test_list:
evaluate_model(model, test_data)
def evaluate_bicubic(model, test_data):
test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" + test_data)
total_psnr = total_ssim = 0
for filename in test_filenames:
psnr, ssim = model.evaluate_bicubic(filename, print_console=False)
total_psnr += psnr
total_ssim += ssim
logging.info("Bicubic Average [%s] PSNR:%f, SSIM:%f" % (
test_data, total_psnr / len(test_filenames), total_ssim / len(test_filenames)))
def evaluate_model(model, test_data):
test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" + test_data)
total_psnr = total_ssim = total_time = 0
for filename in test_filenames:
start = time.time()
if FLAGS.save_results:
psnr, ssim = model.do_for_evaluate_with_output(filename, output_directory=FLAGS.output_dir,
print_console=False)
else:
psnr, ssim = model.do_for_evaluate(filename, print_console=False)
end = time.time()
elapsed_time = end - start
total_psnr += psnr
total_ssim += ssim
total_time += elapsed_time
logging.info("Model Average [%s] PSNR:%f, SSIM:%f, Time (s): %f" % (
test_data, total_psnr / len(test_filenames), total_ssim / len(test_filenames), total_time / len(test_filenames)))
if __name__ == '__main__':
tf.app.run()