forked from YuhuiMa/DFN-tensorflow
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcomponents.py
More file actions
82 lines (56 loc) · 5.83 KB
/
components.py
File metadata and controls
82 lines (56 loc) · 5.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# -*- coding: utf-8 -*-
import tensorflow as tf
def batchnorm(inputs):
#return tf.layers.batch_normalization(inputs, axis=3, epsilon=1e-5, momentum=0.1, training=True, gamma_initializer=tf.random_normal_initializer(1.0, 0.02))
return tf.contrib.layers.layer_norm(inputs)
def lrelu(x, a):
return tf.nn.leaky_relu(x, a)
def side_branch(x, nc, factor, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
y = tf.layers.conv2d(x, nc, kernel_size=1, strides=(1, 1), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
return tf.layers.conv2d_transpose(y, nc, kernel_size=2*factor, strides=(factor, factor), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
def identity_block(batch_input, filters, kernel_size=3, k=0, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
nb_filter1, nb_filter2, nb_filter3 = filters
res_branch2a = tf.layers.conv2d(batch_input, nb_filter1, kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2a = batchnorm(res_branch2a)
lrelu_branch2a = lrelu(bn_branch2a, k)
res_branch2b = tf.layers.conv2d(lrelu_branch2a, nb_filter2, kernel_size=kernel_size, strides=(1, 1), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2b = batchnorm(res_branch2b)
lrelu_branch2b = lrelu(bn_branch2b, k)
res_branch2c = tf.layers.conv2d(lrelu_branch2b, nb_filter3, kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2c = batchnorm(res_branch2c)
return lrelu(batch_input + bn_branch2c, k)
def conv_block(batch_input, filters, kernel_size=3, strides=(2, 2), k=0, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
nb_filter1, nb_filter2, nb_filter3 = filters
######### -*- branch 1 -*- #########
res_branch1 = tf.layers.conv2d(batch_input, nb_filter3, kernel_size=1, strides=strides, padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch1 = batchnorm(res_branch1)
######### -*- branch 2 -*- #########
res_branch2a = tf.layers.conv2d(batch_input, nb_filter1, kernel_size=1, strides=strides, padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2a = batchnorm(res_branch2a)
lrelu_branch2a = lrelu(bn_branch2a, k)
res_branch2b = tf.layers.conv2d(lrelu_branch2a, nb_filter2, kernel_size=kernel_size, strides=(1, 1), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2b = batchnorm(res_branch2b)
lrelu_branch2b = lrelu(bn_branch2b, k)
res_branch2c = tf.layers.conv2d(lrelu_branch2b, nb_filter3, kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2c = batchnorm(res_branch2c)
return lrelu(bn_branch1 + bn_branch2c, k)
######### -*- Refinement Residual Block -*- #########
def rrb(batch_input, filters, kernel_size=3, k=0, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
nb_filter1, nb_filter2 = filters
refine_input = tf.layers.conv2d(batch_input, nb_filter2, kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
res_branch2a = tf.layers.conv2d(refine_input, nb_filter1, kernel_size=kernel_size, strides=(1, 1), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
bn_branch2a = batchnorm(res_branch2a)
lrelu_branch2a = lrelu(bn_branch2a, k)
res_branch2b = tf.layers.conv2d(lrelu_branch2a, nb_filter2, kernel_size=kernel_size, strides=(1, 1), padding="same", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
return lrelu(refine_input + res_branch2b, k)
######### -*- Channel Attention Block -*- #########
def cab(batch_input1, batch_input2, fn, k=0, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
assert batch_input1.get_shape().as_list() == batch_input2.get_shape().as_list()
batch_input = tf.concat([batch_input1, batch_input2], axis=3)
global_avg_pool = tf.nn.avg_pool(batch_input, ksize=[1, batch_input.get_shape().as_list()[1], batch_input.get_shape().as_list()[2], 1], strides=[1, batch_input.get_shape().as_list()[1], batch_input.get_shape().as_list()[2], 1], padding='SAME')
conv_1 = tf.layers.conv2d(global_avg_pool, fn, kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
activation_1 = lrelu(conv_1, k)
conv_2 = tf.layers.conv2d(activation_1, batch_input1.get_shape().as_list()[-1], kernel_size=1, strides=(1, 1), padding="valid", kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
activation_2 = tf.sigmoid(conv_2)
mul = tf.multiply(batch_input1, activation_2)
return batch_input2 + mul