Skip to content

Commit

Permalink
Add save_summaries option, fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
herbiebradley committed Jun 30, 2019
1 parent cf1c1af commit 11664e5
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 20 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ Project Organization
├── Makefile <- Makefile with commands like `make data` or `make train`
├── setup.py <- makes project pip installable (pip install -e .) so src can be imported
├── LICENSE
├── eval_cityscapes <- This contains scripts for getting the experiment results.
└── src <- Source code for use in this project
   ├── __init__.py <- Makes src a Python module
Expand Down
2 changes: 1 addition & 1 deletion src/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def load_train_data(self):
if self.opt.gpu_id != -1:
train_datasetA = train_datasetA.apply(tf.contrib.data.prefetch_to_device(self.gpu_id, buffer_size=1))
train_datasetB = train_datasetB.apply(tf.contrib.data.prefetch_to_device(self.gpu_id, buffer_size=1))
# Create a tf.data.Iterator from the Datasets:
# Create a tf.data.Iterator from each Dataset:
return iter(train_datasetA), iter(train_datasetB)

def load_test_data(self):
Expand Down
1 change: 0 additions & 1 deletion src/models/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,5 +233,4 @@ def call(self, inputs):
x = self.leaky(x)

x = self.conv5(x)
#x = tf.nn.sigmoid(x) # use_sigmoid = not use_lsgan TODO
return x
2 changes: 1 addition & 1 deletion src/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
tf.enable_eager_execution()
"""
Run this module for testing.
Required args: --data_dir, --save_dir
Required args: --data_dir, --save_dir, --results_dir
"""
if __name__ == "__main__":
opt = Options().parse(training=False)
Expand Down
33 changes: 18 additions & 15 deletions src/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@
tf.enable_eager_execution()
"""
Run this module for training.
Required args: --data_dir, --save_dir, --results_dir
Required args: --data_dir, --save_dir
"""
if __name__ == "__main__":
opt = Options().parse(training=True)
dataset = Dataset(opt)
model = CycleGANModel(opt)

device = ("/gpu:" + str(opt.gpu_id)) if opt.gpu_id != -1 else "/cpu:0"

with tf.device(device):
global_step = model.global_step
batches_per_epoch = dataset.get_batches_per_epoch(opt)
Expand All @@ -26,24 +27,26 @@
summary_writer = tf.contrib.summary.create_file_writer(log_dir, flush_millis=10000)
for epoch in range(1, opt.epochs):
start = time.time()
with summary_writer.as_default():
for train_step in range(batches_per_epoch):
# Record summaries every 100 train_steps, we multiply by 3 because there are 3 gradient updates per step.
with tf.contrib.summary.record_summaries_every_n_global_steps(opt.summary_freq * 3, global_step=global_step):
with summary_writer.as_default(),
tf.contrib.summary.record_summaries_every_n_global_steps(opt.summary_freq * 3, global_step=global_step):
model.set_input(dataset.data)
model.optimize_parameters()
# Summaries for Tensorboard:
tf.contrib.summary.scalar('loss/genA2B', model.genA2B_loss)
tf.contrib.summary.scalar('loss/genB2A', model.genB2A_loss)
tf.contrib.summary.scalar('loss/discA', model.discA_loss)
tf.contrib.summary.scalar('loss/discB', model.discB_loss)
tf.contrib.summary.scalar('loss/cyc', model.cyc_lossA + model.cyc_lossB)
tf.contrib.summary.scalar('loss/identity', model.id_lossA + model.id_lossB)
tf.contrib.summary.scalar('learning_rate', model.learning_rate)
tf.contrib.summary.image('A/generated', model.fakeA)
tf.contrib.summary.image('A/reconstructed', model.reconstructedA)
tf.contrib.summary.image('B/generated', model.fakeB)
tf.contrib.summary.image('B/reconstructed', model.reconstructedB)
if opt.save_summaries:
# Summaries for Tensorboard:
tf.contrib.summary.scalar('loss/genA2B', model.genA2B_loss)
tf.contrib.summary.scalar('loss/genB2A', model.genB2A_loss)
tf.contrib.summary.scalar('loss/discA', model.discA_loss)
tf.contrib.summary.scalar('loss/discB', model.discB_loss)
tf.contrib.summary.scalar('loss/cyc', model.cyc_lossA + model.cyc_lossB)
tf.contrib.summary.scalar('loss/identity', model.id_lossA + model.id_lossB)
tf.contrib.summary.scalar('learning_rate', model.learning_rate)
tf.contrib.summary.image('A/generated', model.fakeA)
tf.contrib.summary.image('A/reconstructed', model.reconstructedA)
tf.contrib.summary.image('B/generated', model.fakeB)
tf.contrib.summary.image('B/reconstructed', model.reconstructedB)
print("Iteration complete")
# Assign decayed learning rate:
model.update_learning_rate(batches_per_epoch)
# Checkpoint the model:
Expand Down
3 changes: 2 additions & 1 deletion src/utils/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def __init__(self):
parser.add_argument('--instance_norm', action='store_false', help='if true, uses instance normalisation after each conv layer in D and G')
parser.add_argument('--init_scale', type=float, default=0.02, help='stddev for weight initialisation; small variance helps prevent colour inversion.')
parser.add_argument('--gen_skip', action='store_true', help='if true, use skip connection from first residual block to last in generator')
parser.add_argument('--resize_conv', action='store_true', help='if true, replace conv2dtranspose in generator with upsample -> conv2d')
parser.add_argument('--resize_conv', action='store_false', help='if true, replace conv2dtranspose in generator with upsample -> conv2d')
parser.add_argument('--use_dropout', action='store_true', help='if true, use dropout for the generator')
parser.add_argument('--dropout_prob', type=float, default=0.5, help='dropout probability for all layers in generator')
# dataset options
Expand Down Expand Up @@ -56,6 +56,7 @@ def _get_train_options(self, parser):
parser.add_argument('--training', action='store_false', help='boolean for training/testing')
parser.add_argument('--load_checkpoint', action='store_false', help='if true, loads latest checkpoint')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_summaries', action='store_false', help='if true, stores tensorboard summaries. Turn off to speed up training significantly')
parser.add_argument('--summary_freq', type=int, default=100, help='frequency of saving saving tensorboard summaries in training steps')
parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train the model; learning rate decays to 0 by epoch 200')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
Expand Down

0 comments on commit 11664e5

Please sign in to comment.