Python Implementation
Simple GAN with TensorFlow/Keras
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
def build_generator(latent_dim):
model = models.Sequential([
layers.Dense(256, input_dim=latent_dim),
layers.BatchNormalization(),
layers.LeakyReLU(alpha=0.2),
layers.Dense(512),
layers.BatchNormalization(),
layers.LeakyReLU(alpha=0.2),
layers.Dense(1024),
layers.BatchNormalization(),
layers.LeakyReLU(alpha=0.2),
layers.Dense(784, activation='tanh')
])
return model
def build_discriminator(input_shape):
model = models.Sequential([
layers.Dense(512, input_dim=input_shape),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.3),
layers.Dense(256),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.3),
layers.Dense(1, activation='sigmoid')
])
return model
latent_dim = 100
generator = build_generator(latent_dim)
discriminator = build_discriminator(784)
discriminator.compile(
optimizer=tf.keras.optimizers.Adam(0.0002, 0.5),
loss='binary_crossentropy',
metrics=['accuracy']
)
DCGAN for Image Generation
def build_dcgan_generator(latent_dim):
model = models.Sequential([
layers.Dense(7 * 7 * 256, input_dim=latent_dim),
layers.Reshape((7, 7, 256)),
layers.BatchNormalization(),
layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same'),
layers.BatchNormalization(),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same'),
layers.BatchNormalization(),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', activation='tanh')
])
return model
def build_dcgan_discriminator(img_shape):
model = models.Sequential([
layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=img_shape),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.3),
layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.3),
layers.Flatten(),
layers.Dense(1, activation='sigmoid')
])
return model
GAN Training Loop
def train_gan(generator, discriminator, epochs, batch_size, latent_dim):
discriminator.trainable = False
gan_input = layers.Input(shape=(latent_dim,))
generated_image = generator(gan_input)
gan_output = discriminator(generated_image)
gan = models.Model(gan_input, gan_output)
gan.compile(
optimizer=tf.keras.optimizers.Adam(0.0002, 0.5),
loss='binary_crossentropy'
)
for ncw in range(epochs):
real_images = get_real_batch(batch_size)
noise = np.random.normal(0, 1, (batch_size, latent_dim))
fake_images = generator.predict(noise)
real_labels = np.ones((batch_size, 1))
fake_labels = np.zeros((batch_size, 1))
discriminator.trainable = True
d_loss_real = discriminator.train_on_batch(real_images, real_labels)
d_loss_fake = discriminator.train_on_batch(fake_images, fake_labels)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
discriminator.trainable = False
noise = np.random.normal(0, 1, (batch_size, latent_dim))
g_loss = gan.train_on_batch(noise, real_labels)
if ncw % 100 == 0:
print(f'ncw {ncw}, D Loss: {d_loss[0]:.4f}, G Loss: {g_loss:.4f}')
Conditional GAN
def build_conditional_generator(latent_dim, num_classes):
noise_input = layers.Input(shape=(latent_dim,))
label_input = layers.Input(shape=(1,))
label_embedding = layers.Embedding(num_classes, 50)(label_input)
label_embedding = layers.Flatten()(label_embedding)
merged_input = layers.Concatenate()([noise_input, label_embedding])
x = layers.Dense(256)(merged_input)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha=0.2)(x)
x = layers.Dense(512)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha=0.2)(x)
output = layers.Dense(784, activation='tanh')(x)
model = models.Model([noise_input, label_input], output)
return model