-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_Sony.py
117 lines (92 loc) · 3.82 KB
/
test_Sony.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# uniform content loss + adaptive threshold + per_class_input + recursive G
# improvement upon cqf37
from __future__ import division
import os, scipy.io
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import rawpy
import glob
from model import *
input_dir = './dataset/Sony/short/'
gt_dir = './dataset/Sony/long/'
checkpoint_dir = './checkpoint/Sony/'
result_dir = './result_Sony/'
# get test IDs
test_fns = glob.glob(gt_dir + '/1*.ARW')
test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]
DEBUG = 0
if DEBUG == 1:
save_freq = 2
test_ids = test_ids[0:5]
def pack_raw(raw):
# pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
def pack_raw_gt(raw):
# pack Bayer image to 4 channels,SID数据集是14位,所以这里是16383
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im, 0) / (16383) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
##
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
sess = tf.Session()
in_image = tf.placeholder(tf.float32, [None, None, None, 4])
gt_image = tf.placeholder(tf.float32, [None, None, None, 4])
out_image = denoise_net(in_image)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt:
print('loaded ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
if not os.path.isdir(result_dir + 'final/'):
os.makedirs(result_dir + 'final/')
for test_id in test_ids:
# test the first image in each sequence
in_files = glob.glob(input_dir + '%05d_00*.ARW' % test_id)
for k in range(len(in_files)):
in_path = in_files[k]
in_fn = os.path.basename(in_path)
print(in_fn)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % test_id)
gt_path = gt_files[0]
gt_fn = os.path.basename(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure / in_exposure, 300)
raw = rawpy.imread(in_path)
input_full = np.expand_dims(pack_raw(raw), axis=0) * ratio
scale_full = np.expand_dims(np.float32(im/16383.0),axis = 0)*ratio
gt_raw = rawpy.imread(gt_path)
gt_full = np.expand_dims(np.float32(im / 16383.0), axis=0)
input_full = np.minimum(input_full, 1.0)
output = sess.run(out_image, feed_dict={in_image: input_full})
output = np.minimum(np.maximum(output, 0), 1)
output = output[0, :, :, :]
gt_full = gt_full[0, :, :, :]
scale_full = scale_full[0, :, :, :]
scale_full = scale_full * np.mean(gt_full) / np.mean(
scale_full) # scale the low-light image to the same mean of the groundtruth
## scipy.misc.toimage(output * 255, high=255, low=0, cmin=0, cmax=255).save(
## result_dir + 'final/%5d_00_%d_out.png' % (test_id, ratio))
## scipy.misc.toimage(scale_full * 255, high=255, low=0, cmin=0, cmax=255).save(
## result_dir + 'final/%5d_00_%d_scale.png' % (test_id, ratio))
## scipy.misc.toimage(gt_full * 255, high=255, low=0, cmin=0, cmax=255).save(
## result_dir + 'final/%5d_00_%d_gt.png' % (test_id, ratio))