Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

解决issue7 #10

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions 猫狗识别/.idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions 猫狗识别/.idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions 猫狗识别/.idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions 猫狗识别/.idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions 猫狗识别/.idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions 猫狗识别/.idea/猫狗识别.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

161 changes: 58 additions & 103 deletions 猫狗识别/model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#coding=utf-8
import tensorflow as tf
import tensorflow as tf
from tensorflow.keras import layers, models
# 结构
# conv1 卷积层 1
# pooling1_lrn 池化层 1
Expand All @@ -8,104 +8,59 @@
# local3 全连接层 1
# local4 全连接层 2
# softmax 全连接层 3
def inference(images, batch_size, n_classes):

with tf.variable_scope('conv1') as scope:
# 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
weights = tf.get_variable('weights',
shape=[3, 3, 3, 16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)

with tf.variable_scope('pooling1_lrn') as scope:
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')

with tf.variable_scope('conv2') as scope:
weights = tf.get_variable('weights',
shape=[3, 3, 16, 16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name='conv2')

# pool2 and norm2
with tf.variable_scope('pooling2_lrn') as scope:
norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')

with tf.variable_scope('local3') as scope:
reshape = tf.reshape(pool2, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights',
shape=[dim, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

# local4
with tf.variable_scope('local4') as scope:
weights = tf.get_variable('weights',
shape=[128, 128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')

# softmax
with tf.variable_scope('softmax_linear') as scope:
weights = tf.get_variable('softmax_linear',
shape=[128, n_classes],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[n_classes],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')

return softmax_linear



def losses(logits, labels):
with tf.variable_scope('loss') as scope:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
(logits=logits, labels=labels, name='xentropy_per_example')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope.name + '/loss', loss)
return loss

def trainning(loss, learning_rate):
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step= global_step)
return train_op

def evaluation(logits, labels):
with tf.variable_scope('accuracy') as scope:
correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
tf.summary.scalar(scope.name + '/accuracy', accuracy)
return accuracy

def inference(input_shape, n_classes):
model = models.Sequential()

#修改
model.add(layers.Input(shape=input_shape)) # 使用 Input 层定义输入形状

# Conv1,第一个卷积层,使用3x3的卷积核,输出16个特征图,使用ReLU激活函数
model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same', name='conv1'))

# Pooling1_lrn,添加一个最大池化层,使用3x3的池化窗口,步幅为2x2,然后进行批量归一化
model.add(layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='pooling1'))
model.add(layers.BatchNormalization(name='norm1'))

# Conv2,第二个卷积层,使用3x3的卷积核,输出16个特征图,使用ReLU激活函数
model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same', name='conv2'))

# Pooling2_lrn,进行批量归一化,然后添加一个最大池化层,使用3x3的池化窗口,步幅为1x1
model.add(layers.BatchNormalization(name='norm2'))
model.add(layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='pooling2'))

# Flatten,将多维输入一维化,为全连接层做准备
model.add(layers.Flatten())

# Local3,第一个全连接层,有128个神经元,使用ReLU激活函数
model.add(layers.Dense(128, activation='relu', name='local3'))

# Local4,第二个全连接层,有128个神经元,使用ReLU激活函数
model.add(layers.Dense(128, activation='relu', name='local4'))

# Softmax,输出层,有n_classes个神经元,使用softmax激活函数
model.add(layers.Dense(n_classes, activation='softmax', name='softmax_linear'))

return model


# 计算模型的损失,SparseCategoricalCrossentropy是一个用于多分类问题的损失函数,适用于标签是整数的情况
def losses(logits, labels):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = loss_fn(labels, logits)
return loss


# 定义模型的训练过程
def trainning(model, loss, learning_rate):
# 使用Adam优化器,learning_rate是学习率
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# compile: 编译模型,指定优化器、损失函数和评估指标(准确率)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
return model


# 评估模型的性能
def evaluation(model, images, labels):
loss, accuracy = model.evaluate(images, labels)
return accuracy