Blog

Learn About Our Meetup

4200+ Members

[D] GAN Immediate Mode Collapse

I’m not even sure if mode collapse is the correct term; neither the generator nor discriminator is learning anything when I pass both true/false samples to the discriminator. If instead I only show the discriminator true or false samples, the loss drops. I’ve seen mode collapse after a few epochs of training other GANs but never complete stagnation out of the gate. What might be going wrong here?

def generator(): neurons = 121 model = Sequential() # Input shape [batch_size,timestep,input_dim] model.add(LSTM(neurons,activation='tanh',recurrent_activation='hard_sigmoid',kernel_initializer='RandomUniform',return_sequences=True)) model.add(LSTM(neurons,activation='tanh',recurrent_activation='hard_sigmoid',kernel_initializer='RandomUniform',return_sequences=True)) model.add(Dense(1,activation=None)) return model def discriminator(): model = Sequential() # Input shape [batch_size,steps,channels] model.add(Conv1D(32,4,strides=2,activation=None,padding='same',input_shape=(None,1))) model.add(LeakyReLU()) model.add(Conv1D(64,4,strides=2,activation=None,padding='same')) model.add(LeakyReLU()) model.add(BatchNormalization()) model.add(Conv1D(128,4,strides=2,activation=None,padding='same')) model.add(LeakyReLU()) model.add(BatchNormalization()) model.add(Dense(128,activation='relu')) model.add(Dense(1,activation='sigmoid')) return model def generator_containing_discriminator(g, d): model = Sequential() model.add(g) d.trainable = False model.add(d) return model def g_loss_function(y_true,y_pred): l_bce = keras.losses.binary_crossentropy(y_tue,y_pred) l_norm = K.sqrt(K.square(y_true)-K.square(y_pred)) return l_bce+l_norm def train(X,Y,BATCH_SIZE): d_optim = SGD(lr=0.002) g_optim = SGD(lr=0.00004) g = generator() d = discriminator() gan = generator_containing_discriminator(g, d) g.compile(loss=g_loss_function, optimizer=g_optim) gan.compile(loss='binary_crossentropy',optimizer="SGD") d.trainable = True d.compile(loss='binary_crossentropy', optimizer=d_optim) num_batches = int(X.shape[0]/float(BATCH_SIZE)) for epoch in range(1000): for index in range(1,num_batches): # Prepare data startIdx = (index-1)*BATCH_SIZE endIdx = index*BATCH_SIZE inputs = X[startIdx:endIdx,:] targets = Y[startIdx:endIdx] # Generate predictions Y_pred = g.predict(inputs) # Build input and truth arrays for discriminator targets = targets.reshape(BATCH_SIZE,1,1) truth = np.vstack((np.ones((BATCH_SIZE,1,1)),np.zeros((BATCH_SIZE,1,1)))) d_loss = d.train_on_batch(np.vstack((targets,Y_pred)),truth) d.trainable = False # Test GAN g_truth = np.ones((BATCH_SIZE,1,1)) g_loss = gan.train_on_batch(inputs,g_truth) d.trainable = True print('Epoch {} | d_loss: {} | g_loss: {}'.format(epoch, d_loss,g_loss)) g.save_weights('generator',True) d.save_weights('discriminator',True) return d,g,gan 

submitted by /u/Cranial_Vault
[link] [comments]

Next Meetup

 

Days
:
Hours
:
Minutes
:
Seconds

 

Plug yourself into AI and don't miss a beat

 


Toronto AI is a social and collaborative hub to unite AI innovators of Toronto and surrounding areas. We explore AI technologies in digital art and music, healthcare, marketing, fintech, vr, robotics and more. Toronto AI was founded by Dave MacDonald and Patrick O'Mara.