#教计算机学画卡通人物#生成式对抗神经网络GAN原理、Tensorflow搭建网络生成卡通人脸 | 您所在的位置:网站首页 › 搭建卡通 › #教计算机学画卡通人物#生成式对抗神经网络GAN原理、Tensorflow搭建网络生成卡通人脸 |
生成式对抗神经网络GAN原理、Tensorflow搭建网络生成卡通人脸
下面这张图是我教计算机学画画,计算机学会之后画出来的,具体实现在下面。 ▲GAN原理 ●GAN的主要灵感来源于博弈论中零和博弈的思想,应用到深度学习神经网络上来说,通过生成器G(Generator)和判别器D(Discriminator)不断博弈,从而使G学习到真实的数据分布,如果用到图片生成上,则训练完成后,G可以从一组随机数中生成逼真的图像。 ●激活函数选择 D网络除了最后一层输出不激活,其他用Relu激活 G网络最后一层用sigmoid激活,之前层用Relu激活 标准差stddev=0.01。 ▲变种之DCGAN ●DCGAN是继GAN 之后较好的改进,其改进主要是在网络结构上,到目前为止,DCGAN的网络结构还是被广泛应用,DCGAN极大地提升了GAN的训练稳定度和生成结果的质量。 ▲用Tensorflow框架搭建DCGAN生成卡通人脸 这是卡通人脸数据集 ●然后写DCGAN网络 下面是所有完整程序 import tensorflow as tf import PIL.Image as pimg import matplotlib.pyplot as plt import numpy as np from Samping import MyDataset class D_net: def __init__(self): self.w1=tf.Variable(tf.truncated_normal(shape=[5,5,3,64],stddev=0.02,dtype=tf.float32)) self.b1=tf.Variable(tf.zeros(shape=[64],dtype=tf.float32)) self.w2 = tf.Variable(tf.truncated_normal(shape=[5, 5, 64, 128], stddev=0.02, dtype=tf.float32)) self.b2 = tf.Variable(tf.zeros(shape=[128], dtype=tf.float32)) self.w3 = tf.Variable(tf.truncated_normal(shape=[5, 5, 128, 256], stddev=0.02, dtype=tf.float32)) self.b3 = tf.Variable(tf.zeros(shape=[256], dtype=tf.float32)) self.w4 = tf.Variable(tf.truncated_normal(shape=[5, 5, 256, 512], stddev=0.02, dtype=tf.float32)) self.b4 = tf.Variable(tf.zeros(shape=[512], dtype=tf.float32)) self.w5 = tf.Variable(tf.truncated_normal(shape=[6, 6, 512, 1], stddev=0.02, dtype=tf.float32)) self.b5 = tf.Variable(tf.zeros(shape=[1], dtype=tf.float32)) def forward(self,x): x=tf.reshape(x,[-1,96,96,3]) y1=tf.nn.leaky_relu(tf.nn.conv2d(x,self.w1,[1,2,2,1],padding="SAME")+self.b1)#48*48*64 y2=tf.nn.leaky_relu(tf.layers.batch_normalization(tf.nn.conv2d(y1,self.w2,[1,2,2,1],padding="SAME")+self.b2))#24*24*128 y3=tf.nn.leaky_relu(tf.layers.batch_normalization(tf.nn.conv2d(y2,self.w3,[1,2,2,1],padding="SAME")+self.b3))#*12*12*256 y4=tf.nn.leaky_relu(tf.layers.batch_normalization(tf.nn.conv2d(y3,self.w4,[1,2,2,1],padding="SAME")+self.b4))#6*6*512 y5=tf.nn.leaky_relu(tf.layers.batch_normalization(tf.nn.conv2d(y4,self.w5,[1,1,1,1],padding="VALID")+self.b5))#1*1*1 y5=tf.reshape(y5,[-1,1])#★★★★★ return y5 def params(self): return [self.w1,self.b1,self.w2,self.b2,self.w3,self.b3,self.w4,self.b4,self.w5,self.b5] class G_net: def __init__(self): self.w1 = tf.Variable(tf.truncated_normal(shape=[128,6*6*512], stddev=0.02, dtype=tf.float32)) self.b1 = tf.Variable(tf.zeros(shape=[6*6*512], dtype=tf.float32))#[None,6*6*512] self.w2 = tf.Variable(tf.truncated_normal(shape=[5,5,256,512], stddev=0.02, dtype=tf.float32)) self.b2 = tf.Variable(tf.zeros(shape=[256], dtype=tf.float32)) #[100,12,12,256] self.w3 = tf.Variable(tf.truncated_normal(shape=[5, 5, 128, 256], stddev=0.02, dtype=tf.float32)) self.b3 = tf.Variable(tf.zeros(shape=[128], dtype=tf.float32)) # [100,24,24,128] self.w4 = tf.Variable(tf.truncated_normal(shape=[5, 5, 64, 128], stddev=0.02, dtype=tf.float32)) self.b4 = tf.Variable(tf.zeros(shape=[64], dtype=tf.float32)) # 48*48*64 self.w5 = tf.Variable(tf.truncated_normal(shape=[5, 5, 3, 64], stddev=0.02, dtype=tf.float32)) self.b5 = tf.Variable(tf.zeros(shape=[3], dtype=tf.float32)) # 96*96*3 def forward(self,x):#x:[None,128] y1=tf.nn.relu(tf.layers.batch_normalization(tf.matmul(x,self.w1)+self.b1)) #[None,6*6*512] y1=tf.reshape(y1,[-1,6,6,512]) y2=tf.nn.relu(tf.layers.batch_normalization(tf.nn.conv2d_transpose( y1,self.w2,[100,12,12,256],strides=[1,2,2,1],padding="SAME")+self.b2))#[100,11,11,256] y3=tf.nn.relu(tf.layers.batch_normalization(tf.nn.conv2d_transpose( y2,self.w3,[100,24,24,128],strides=[1,2,2,1],padding="SAME")+self.b3))#[100,24,24,128] y4=tf.nn.tanh(tf.nn.conv2d_transpose( y3,self.w4,[100,48,48,64],strides=[1,2,2,1],padding="SAME")+self.b4)#48*48*64 y5 = tf.nn.tanh(tf.nn.conv2d_transpose( y4, self.w5, [100, 96, 96, 3], strides=[1, 2, 2, 1], padding="SAME") + self.b5) # [100, 96, 96, 3] return y5 def params(self): return [self.w1,self.b1,self.w2,self.b2,self.w3,self.b3,self.w4,self.b4,self.w5,self.b5] class Net: def __init__(self): self.fake_x=tf.placeholder(dtype=tf.float32,shape=[None,128]) self.real_x=tf.placeholder(dtype=tf.float32,shape=[None,96,96,3])#★★★ self.fake_label=tf.placeholder(dtype=tf.float32,shape=[None,1]) self.real_label=tf.placeholder(dtype=tf.float32,shape=[None,1]) self.d_net=D_net() self.g_net = G_net() def forward(self): self.g_fake_out=self.g_net.forward(self.fake_x) self.d_fake_out = self.d_net.forward(self.g_fake_out) self.d_real_out = self.d_net.forward(self.real_x) def loss(self): self.d_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( labels=self.fake_label, logits=self.d_fake_out)) self.d_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( labels=self.real_label, logits=self.d_real_out)) self.d_loss = self.d_fake_loss + self.d_real_loss self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( labels=self.fake_label, logits=self.d_fake_out)) def backward(self): self.d_optimizer = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.d_loss, var_list=self.d_net.params()) self.g_optimizer = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.g_loss, var_list=self.g_net.params()) if __name__ == '__main__': net=Net() net.forward() net.loss() net.backward() mydataset=MyDataset() init=tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(50000): real_x=mydataset.get_batch(100) real_label=np.ones([100,1]) fake_x=np.random.uniform(-1,1,(100,128)) fake_label=np.zeros([100,1]) D_loss, _ = sess.run([net.d_loss, net.d_optimizer], feed_dict={net.fake_x: fake_x, net.fake_label: fake_label, net.real_x: real_x, net.real_label: real_label}) fake_xs=np.random.uniform(-1,1,(100,128)) fake_labels=np.ones([100,1]) sess.run([net.g_loss, net.g_optimizer],#★★★★ feed_dict={net.fake_x: fake_xs, net.fake_label: fake_labels}) G_loss, _ = sess.run([net.g_loss, net.g_optimizer], feed_dict={net.fake_x: fake_xs, net.fake_label: fake_labels}) if i % 10==0: fake_xss=np.random.uniform(-1,1,(100,128)) array=sess.run(net.g_fake_out,feed_dict={net.fake_x:fake_xss})##[100,96,96,3] img_array=np.reshape(array[0],[96,96,3])#★★★ plt.imshow(img_array) plt.pause(0.1) print("d_loss", D_loss) print("g_loss", G_loss)▲结果:下面的图就是计算机画出来的,怎么样? |
CopyRight 2018-2019 实验室设备网 版权所有 |