Skip to content
This repository has been archived by the owner on Mar 2, 2022. It is now read-only.

Consideration of Tensorflow layer replacement

PINTO edited this page Dec 16, 2018 · 8 revisions

tf.exp (Not supported by OpenVINO)

Computes exponential of x element-wise.
y = e ^ x
e ≒ 2.7

tf.exp(
    x,
    name=None
)

tf.fill (Not supported by OpenVINO)

Output tensor has shape [2, 3]

tf.fill([2, 3], 2.7) ==> [[2.7, 2.7, 2.7],
                          [2.7, 2.7, 2.7]]

tf.tile

import tensorflow as tf

x = tf.constant([[1,0],[0,1]])
with tf.Session() as sess:   
    print(x.eval())

[[1 0]
 [0 1]]

y = tf.tile(x, [2,1])
with tf.Session() as sess:   
    print(y.eval())

[[1 0]
 [0 1]
 [1 0]
 [0 1]]

y = tf.tile(x, [1,2])
with tf.Session() as sess:   
    print(y.eval())

[[1 0 1 0]
 [0 1 0 1]]

y = tf.tile(x, [2,2])
with tf.Session() as sess:   
    print(y.eval())

[[1 0 1 0]
 [0 1 0 1]
 [1 0 1 0]
 [0 1 0 1]]

# 't' is a tensor of [a b c d]
tf.tile(t, [2])
# [a b c d a b c d]

# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.tile(t2, [3, 1, 1]))  
# [6, 3, 5]

tf.pow

Computes the power of one value to another.
Given a tensor x and a tensor y, this operation computes x ^ y for corresponding elements in x and y.

x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y)  # [[256, 65536], [9, 27]]

tf.reshape

tf.reshape(
    tensor: Tensor,
    shape: Tensor[int32, int64]],
    name: str=None
)

# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
                        [4, 5, 6],
                        [7, 8, 9]]

# tensor 't' is [[[1, 1], [2, 2]],
#                [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
                        [3, 3, 4, 4]]

# tensor 't' is [[[1, 1, 1],
#                 [2, 2, 2]],
#                [[3, 3, 3],
#                 [4, 4, 4]],
#                [[5, 5, 5],
#                 [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

# -1 can also be used to infer the shape

# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
                              [2, 2, 2],
                              [3, 3, 3]],
                             [[4, 4, 4],
                              [5, 5, 5],
                              [6, 6, 6]]]

# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7

tf.concat

tf.concat(
    values: Tensor,
    axis: Tensor[int],
    name='concat'
)

t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0)  # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1)  # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]

# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0))  # [4, 3]
tf.shape(tf.concat([t3, t4], 1))  # [2, 6]

t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
[[[ 1,  2,  7,  4],
  [ 2,  3,  8,  4]],

 [[ 4,  4,  2, 10],
  [ 5,  3, 15, 11]]]

tf.squeeze

tf.squeeze(
    input: Tensor,
    axis: Optional[List[int]]=None,
    name=None,
    squeeze_dims=None
)

# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t))  # [2, 3]

# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4]))  # [1, 2, 3, 1]

tf.split

tf.split(
    value: Tensor,
    num_or_size_splits: Union[Tensor[int], Tensor[List[int]]],
    axis=0,
    num=None,
    name='split'
)

# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0)  # [5, 4]
tf.shape(split1)  # [5, 15]
tf.shape(split2)  # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0)  # [5, 10]

Get size of shape of Tensor

# A is a tensor with shape [5, 2, 3]
shape_a = tf.shape(A)  # [5, 2, 3]
dim1, dim2, dim3 = tf.unstack(shape_a)
# Tensor(5), Tensor(2), Tensor(3) respectively enter
# Since shape_a was a one-dimensional Tensor, it can be taken separately by unstacking

Create a one-dimensional large Tensor replicating N Tensor newly

encoder_state = ...  # [batch_size, max_length, hidden_unit_num]
new_state = ...  # [1, hidden_unit_num]
new_state = tf.expand_dims(new_state, 0)  # [1, 1, hidden_unit_num]
new_state = tf.tile(new_state, [encoder_state.shape[0], 1, 1])  # [batch_size, 1, hidden_unit_num]