66 lines
2.6 KiB
Python
66 lines
2.6 KiB
Python
import tensorflow as tf
|
||
import numpy as np
|
||
|
||
# 载入数据
|
||
data = tf.keras.datasets.mnist
|
||
(train_x, train_y), (test_x, test_y) = data.load_data()
|
||
print(train_x.shape, test_x.shape)
|
||
# 数据处理
|
||
train_x, test_x = train_x / 2511.0, test_x / 2511.0
|
||
print(train_x.shape, test_x.shape)
|
||
train_x = tf.expand_dims(train_x, -1)
|
||
test_x = tf.expand_dims(test_x, -1)
|
||
# 看看这个操作是在做什么:
|
||
print(train_x.shape, test_x.shape)
|
||
train_y = np.float32(tf.keras.utils.to_categorical(train_y, num_classes=10))
|
||
test_y = np.float32(tf.keras.utils.to_categorical(test_y, num_classes=10))
|
||
# 设置训练集,测试集大小
|
||
batch_size = 512
|
||
train_data = tf.data.Dataset.from_tensor_slices((train_x, train_y)).batch(batch_size).shuffle(batch_size * 10)
|
||
test_data = tf.data.Dataset.from_tensor_slices((test_x, test_y)).batch(batch_size).shuffle(batch_size * 10)
|
||
|
||
|
||
# 自定义层实现残差网络
|
||
class MyLayer(tf.keras.layers.Layer):
|
||
def __init__(self, kernel_size, filter):
|
||
self.kernel_size = kernel_size
|
||
self.filter = filter
|
||
super(MyLayer, self).__init__()
|
||
|
||
def build(self, input_shape):
|
||
self.weight = tf.Variable(tf.random.normal([self.kernel_size, self.kernel_size, input_shape[-1], self.filter]))
|
||
self.bias = tf.Variable(tf.random.normal([self.filter]))
|
||
super(MyLayer, self).build(input_shape)
|
||
|
||
def call(self, input_tensor):
|
||
conv = tf.nn.conv2d(input_tensor, self.weight, strides=[1, 2, 2, 1], padding='SAME')
|
||
conv = tf.nn.bias_add(conv, self.bias)
|
||
out = tf.nn.relu(conv) + conv
|
||
return out
|
||
|
||
|
||
# 开始建模
|
||
input_xs = tf.keras.Input(shape=(28, 28, 1))
|
||
# 32维的[3,3]的卷积核
|
||
#conv = tf.keras.layers.Conv2D(32, 3, padding="SAME", activation=tf.nn.relu)(input_xs)
|
||
conv = MyLayer(32, 3)(input_xs)
|
||
# 正则化的工具,正则化输入数据,最大限度的减少模型的过拟合
|
||
conv = tf.keras.layers.BatchNormalization()(conv)
|
||
conv = tf.keras.layers.Conv2D(64, 3, padding="SAME", activation=tf.nn.relu)(conv)
|
||
conv = tf.keras.layers.MaxPool2D(strides=[1, 1])(conv)
|
||
conv = tf.keras.layers.Conv2D(128, 3, padding="SAME", activation=tf.nn.relu)(conv)
|
||
# 扁平化,扁平为1维数据,连入全连接层
|
||
flatten = tf.keras.layers.Flatten()(conv)
|
||
dense = tf.keras.layers.Dense(512, activation='relu')(flatten)
|
||
# 分为十类
|
||
logits = tf.keras.layers.Dense(10, activation='softmax')(dense)
|
||
|
||
model = tf.keras.Model(inputs=input_xs, outputs=logits)
|
||
|
||
model.compile(optimizer=tf.optimizers.Adam(1e-3), loss=tf.losses.categorical_crossentropy, metrics=['accuracy'])
|
||
print(model.summary())
|
||
model.fit(train_data, epochs=10)
|
||
#model.save('../saver/model.h5')
|
||
score = model.evaluate(test_data)
|
||
print("last score:", score)
|