104 lines
3.3 KiB
Python
104 lines
3.3 KiB
Python
import tensorflow as tf
|
|
import tensorflow.keras.layers as layers
|
|
import tensorflow.losses
|
|
import math
|
|
|
|
|
|
def cross_entropy(Y, P):
|
|
sum = 0.0
|
|
for x in map(lambda y, p: (1 - y) * math.log(1 - p) + y * math.log(p), Y, P):
|
|
sum += x
|
|
return sum
|
|
|
|
|
|
# 生成器模型
|
|
def make_generator_model():
|
|
model = tf.keras.Sequential()
|
|
model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))
|
|
model.add(layers.BatchNormalization())
|
|
model.add(layers.LeakyReLU())
|
|
|
|
model.add(layers.Reshape((7, 7, 256)))
|
|
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
|
|
|
|
model.add(layers.BatchNormalization())
|
|
model.add(layers.LeakyReLU())
|
|
|
|
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
|
|
|
|
model.add(layers.BatchNormalization())
|
|
model.add(layers.LeakyReLU())
|
|
|
|
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
|
|
|
|
|
|
# 判别器模型
|
|
def make_discriminator_model():
|
|
model = tf.keras.Sequential()
|
|
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
|
|
input_shape=[28, 28, 1]))
|
|
model.add(layers.LeakyReLU())
|
|
model.add(layers.Dropout(0.3))
|
|
|
|
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
|
|
model.add(layers.LeakyReLU())
|
|
model.add(layers.Dropout(0.3))
|
|
|
|
model.add(layers.Flatten())
|
|
model.add(layers.Dense(1))
|
|
|
|
return model
|
|
|
|
|
|
# 判别器损失函数
|
|
def discriminator_loss(real_output, fake_output):
|
|
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
|
|
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
|
|
total_loss = real_loss + fake_loss
|
|
return total_loss
|
|
|
|
|
|
# 生成器损失函数
|
|
def generator_loss(fake_output):
|
|
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
|
|
|
|
|
# 生成器和判别器优化器
|
|
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
|
|
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
|
|
|
|
|
|
# 训练过程定义
|
|
def train_step(images):
|
|
BATCH_SIZE, noise_dim=images.get_shape().as_list[-1]
|
|
noise = tf.random.normal([BATCH_SIZE, noise_dim])
|
|
|
|
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
|
generated_images = generator(noise, training=True)
|
|
|
|
real_output = discriminator(images, training=True)
|
|
fake_output = discriminator(generated_images, training=True)
|
|
|
|
gen_loss = generator_loss(fake_output)
|
|
disc_loss = discriminator_loss(real_output, fake_output)
|
|
|
|
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
|
|
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
|
|
|
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
|
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
|
|
|
|
|
# 开始训练
|
|
def train(dataset, epochs):
|
|
for epoch in range(epochs):
|
|
start = time.time()
|
|
|
|
for image_batch in dataset:
|
|
train_step(image_batch)
|
|
|
|
# 只对generator模型进行参数保存
|
|
generator = make_generator_model()
|
|
discriminator = make_discriminator_model()
|
|
model.save_weights("./saver/model")
|