Skip to content

Commit aa50f41

Browse files
author
Matheus Della Croce Oliveira
committedMar 3, 2017
compatible with tensorflow 1.0
1 parent 757b3a7 commit aa50f41

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed
 

‎text_cnn.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ def __init__(
2121

2222
# Embedding layer
2323
with tf.device('/cpu:0'), tf.name_scope("embedding"):
24-
W = tf.Variable(
24+
self.W = tf.Variable(
2525
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
2626
name="W")
27-
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
27+
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
2828
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
2929

3030
# Create a convolution + maxpool layer for each filter size
@@ -54,7 +54,7 @@ def __init__(
5454

5555
# Combine all the pooled features
5656
num_filters_total = num_filters * len(filter_sizes)
57-
self.h_pool = tf.concat(3, pooled_outputs)
57+
self.h_pool = tf.concat(pooled_outputs, 3)
5858
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
5959

6060
# Add dropout
@@ -75,7 +75,7 @@ def __init__(
7575

7676
# CalculateMean cross-entropy loss
7777
with tf.name_scope("loss"):
78-
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
78+
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
7979
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
8080

8181
# Accuracy

0 commit comments

Comments
 (0)
Please sign in to comment.