import tensorflow as tf import matplotlib.pyplot as plt import numpy as np from tensorflow.keras import layers from tensorflow.keras.utils import to_categorical
deftrain_step(images_one_batch,one_batch_labels): noise=tf.random.normal([images_one_batch.shape[0],noise_dim])#noise=seed with tf.GradientTape() as gen_tape,tf.GradientTape() as disc_tape: real_out,real_pred_labels=discriminator(images_one_batch,training=True)#真实图片送入判别器之后得到的预测标签,预测类别 gen_image=generator(noise,training=True) fake_out,fake_pred_labels=discriminator(gen_image,training=True)#生成的假图片送入判别器之后得到的预测标签,预测类别 to_categorical_real_labels=to_categorical(one_batch_labels,num_classes=10+1) to_categorical_fake_labels=to_categorical(np.full((images_one_batch.shape[0],1),10),num_classes=10+1)#类别10代表假,真实样本的类别为0-9 #分别计算两者的损失 gen_loss=generator_loss(fake_out) disc_loss=discriminator_loss(real_out,fake_out,real_pred_labels,fake_pred_labels,to_categorical_real_labels,to_categorical_fake_labels) #求可训练参数的梯度 gradient_gen=gen_tape.gradient(gen_loss,generator.trainable_variables) gradient_disc=disc_tape.gradient(disc_loss,discriminator.trainable_variables) #使用优化器更新可训练参数的权值 generator_opt.apply_gradients(zip(gradient_gen,generator.trainable_variables)) discriminator_opt.apply_gradients(zip(gradient_disc,discriminator.trainable_variables))
定义生成图片的展示函数
1 2 3 4 5 6 7 8 9
#将test_noise送入gen_model,以产生假图片 defgenerate_plot_image(gen_model,test_noise): pre_images=gen_model(test_noise,training=False)#此时无需训练生成器网络 fig=plt.figure(figsize=(4,4)) for i inrange(pre_images.shape[0]): plt.subplot(4,4,i+1) plt.imshow((pre_images[i,:,:,0]+1)/2,cmap='gray') plt.axis('off') plt.show()
定义训练函数
1 2 3 4 5
deftrain(dataset,epochs): for epoch inrange(epochs): for image_batch,one_batch_labels in dataset: train_step(image_batch,one_batch_labels) generate_plot_image(generator,seed)
开始训练
1
train(datasets,EPOCHS)
训练结果
一直报warning:
1
WARNING:tensorflow:Gradients do not exist for variables ['discriminator_model/conv2d/kernel:0', 'discriminator_model/conv2d/bias:0'] when minimizing the loss.