@@ -21,10 +21,10 @@ def __init__(
21
21
22
22
# Embedding layer
23
23
with tf .device ('/cpu:0' ), tf .name_scope ("embedding" ):
24
- W = tf .Variable (
24
+ self . W = tf .Variable (
25
25
tf .random_uniform ([vocab_size , embedding_size ], - 1.0 , 1.0 ),
26
26
name = "W" )
27
- self .embedded_chars = tf .nn .embedding_lookup (W , self .input_x )
27
+ self .embedded_chars = tf .nn .embedding_lookup (self . W , self .input_x )
28
28
self .embedded_chars_expanded = tf .expand_dims (self .embedded_chars , - 1 )
29
29
30
30
# Create a convolution + maxpool layer for each filter size
@@ -54,7 +54,7 @@ def __init__(
54
54
55
55
# Combine all the pooled features
56
56
num_filters_total = num_filters * len (filter_sizes )
57
- self .h_pool = tf .concat (3 , pooled_outputs )
57
+ self .h_pool = tf .concat (pooled_outputs , 3 )
58
58
self .h_pool_flat = tf .reshape (self .h_pool , [- 1 , num_filters_total ])
59
59
60
60
# Add dropout
@@ -75,7 +75,7 @@ def __init__(
75
75
76
76
# CalculateMean cross-entropy loss
77
77
with tf .name_scope ("loss" ):
78
- losses = tf .nn .softmax_cross_entropy_with_logits (self .scores , self .input_y )
78
+ losses = tf .nn .softmax_cross_entropy_with_logits (logits = self .scores , labels = self .input_y )
79
79
self .loss = tf .reduce_mean (losses ) + l2_reg_lambda * l2_loss
80
80
81
81
# Accuracy
0 commit comments