-
Notifications
You must be signed in to change notification settings - Fork 0
/
tf_ops.py
67 lines (55 loc) · 2.78 KB
/
tf_ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
# Adapted from https://github.com/hardmaru/diff-vae-tensorflow/blob/master/ops.py
class batch_norm(object):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __init__(self, batch_size, epsilon=1e-5, momentum = 0.1, name="batch_norm"):
with tf.variable_scope(name) as scope:
self.epsilon = epsilon
self.momentum = momentum
self.batch_size = batch_size
self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
self.name=name
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.mean, self.variance = tf.nn.moments(x, [0, 1, 2])
return tf.nn.batch_norm_with_global_normalization(
x, self.mean, self.variance, self.beta, self.gamma, self.epsilon,
scale_after_normalization=True)
def binary_cross_entropy_with_logits(logits, targets, name=None):
"""Computes binary cross entropy given `logits`.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
"""
eps = 1e-12
with ops.op_scope([logits, targets], name, "bce_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(logits * tf.log(targets + eps) +
(1. - logits) * tf.log(1. - targets + eps)))
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.nn.xw_plus_b(input_, matrix, bias), matrix, bias
else:
return tf.nn.xw_plus_b(input_, matrix, bias)