80 lines
3.3 KiB
Python
80 lines
3.3 KiB
Python
import tensorflow as tf
|
||
import tensorflow_datasets as tfds
|
||
import numpy as np
|
||
|
||
CIFAR_100_data = tf.keras.datasets.cifar100
|
||
(train_data, train_label), (test_data, test_label) = CIFAR_100_data.load_data()
|
||
'''train_data=np.array(train_data)
|
||
train_label=np.array(train_data)
|
||
print(train_data.shape)
|
||
print(train_label.shape)'''
|
||
|
||
|
||
|
||
def identity_block(input_tensor, out_dim):
|
||
con1 = tf.keras.layers.Conv2D(filters=out_dim // 4, kernel_size=1, padding='SAME', activation=tf.nn.relu)(
|
||
input_tensor)
|
||
bhn1 = tf.keras.layers.BatchNormalization()(con1)
|
||
|
||
con2 = tf.keras.layers.Conv2D(filters=out_dim // 4, kernel_size=3, padding='SAME', activation=tf.nn.relu)(bhn1)
|
||
bhn2 = tf.keras.layers.BatchNormalization()(con2)
|
||
|
||
con3 = tf.keras.layers.Conv2D(filters=out_dim, kernel_size=1, padding='SAME', activation=tf.nn.relu)(bhn2)
|
||
|
||
out = tf.keras.layers.Add()([input_tensor, con3])
|
||
out = tf.nn.relu(out)
|
||
|
||
return out
|
||
|
||
|
||
def resnet_Model():
|
||
inputs = tf.keras.Input(shape=[32, 32, 3])
|
||
conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='SAME', activation=tf.nn.relu)(inputs)
|
||
'''第一层'''
|
||
output_dim = 64
|
||
identity_1 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||
conv1)
|
||
identity_1 = tf.keras.layers.BatchNormalization()(identity_1)
|
||
for _ in range(3):
|
||
identity_1 = identity_block(identity_1, output_dim)
|
||
'''第二层'''
|
||
output_dim = 128
|
||
identity_2 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||
identity_1)
|
||
identity_2 = tf.keras.layers.BatchNormalization()(identity_2)
|
||
for _ in range(2):
|
||
identity_2 = identity_block(identity_2, output_dim)
|
||
'''第三层'''
|
||
output_dim = 256
|
||
identity_3 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||
identity_2)
|
||
identity_3 = tf.keras.layers.BatchNormalization()(identity_3)
|
||
for _ in range(3):
|
||
identity_3 = identity_block(identity_3, output_dim)
|
||
'''第四层'''
|
||
output_dim = 512
|
||
identity_4 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||
identity_3)
|
||
identity_4 = tf.keras.layers.BatchNormalization()(identity_4)
|
||
for _ in range(3):
|
||
identity_4 = identity_block(identity_4, output_dim)
|
||
flatten = tf.keras.layers.Flatten()(identity_4)
|
||
dropout = tf.keras.layers.Dropout(0.217)(flatten)
|
||
dense = tf.keras.layers.Dense(1024, activation=tf.nn.relu)(dropout)
|
||
dense = tf.keras.layers.BatchNormalization()(dense)
|
||
dense = tf.keras.layers.Dense(100, activation=tf.nn.softmax)(dense)
|
||
model = tf.keras.Model(inputs=inputs,outputs= dense)
|
||
return model
|
||
|
||
|
||
if __name__ == '__main__':
|
||
# 全连接层大小和上面循环的层数有所减小,否则训练不动
|
||
# 报错tensorflow错误解决:“tensorflow.python.framework.errors_impl.ResourceExhaustedError”
|
||
#解决方法:
|
||
resnet_model = resnet_Model()
|
||
resnet_model.compile(optimizer=tf.optimizers.Adam(1e-2), loss=tf.losses.sparse_categorical_crossentropy,
|
||
metrics=['accuracy'])
|
||
resnet_model.fit(train_data, train_label, epochs=10,batch_size=100)
|
||
score = resnet_model.evaluate(test_data, test_label)
|
||
print('score:', score)
|