Skip to content

Commit

Permalink
Fix indentation and add requirements.txt for convenience
Browse files Browse the repository at this point in the history
  • Loading branch information
herbiebradley committed Dec 28, 2019
1 parent 74b8416 commit 148b32c
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 22 deletions.
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,14 @@ A Tensorflow implementation of [Unpaired Image-to-Image Translation using Cycle-
Requirements:

- Tensorflow 1.11
- Python 3.6

Thanks to the original authors PyTorch implementation for inspiration: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix

Project Organization
------------

├── README.md
├── Makefile <- Makefile with commands like `make data` or `make train`
├── requirements.txt <- Use `pip install -r requirements.txt`
├── setup.py <- makes project pip installable (pip install -e .) so src can be imported
├── LICENSE
└── src <- Source code for use in this project
Expand Down
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
tensorflow==1.11.0
numpy==1.16.2
40 changes: 20 additions & 20 deletions src/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,26 +27,26 @@
summary_writer = tf.contrib.summary.create_file_writer(log_dir, flush_millis=10000)
for epoch in range(1, opt.epochs):
start = time.time()
for train_step in range(batches_per_epoch):
# Record summaries every 100 train_steps, we multiply by 3 because there are 3 gradient updates per step.
with summary_writer.as_default(),
tf.contrib.summary.record_summaries_every_n_global_steps(opt.summary_freq * 3, global_step=global_step):
model.set_input(dataset.data)
model.optimize_parameters()
if opt.save_summaries:
# Summaries for Tensorboard:
tf.contrib.summary.scalar('loss/genA2B', model.genA2B_loss)
tf.contrib.summary.scalar('loss/genB2A', model.genB2A_loss)
tf.contrib.summary.scalar('loss/discA', model.discA_loss)
tf.contrib.summary.scalar('loss/discB', model.discB_loss)
tf.contrib.summary.scalar('loss/cyc', model.cyc_lossA + model.cyc_lossB)
tf.contrib.summary.scalar('loss/identity', model.id_lossA + model.id_lossB)
tf.contrib.summary.scalar('learning_rate', model.learning_rate)
tf.contrib.summary.image('A/generated', model.fakeA)
tf.contrib.summary.image('A/reconstructed', model.reconstructedA)
tf.contrib.summary.image('B/generated', model.fakeB)
tf.contrib.summary.image('B/reconstructed', model.reconstructedB)
print("Iteration complete")
for train_step in range(batches_per_epoch):
# Record summaries every 100 train_steps, we multiply by 3 because there are 3 gradient updates per step.
with summary_writer.as_default(), \
tf.contrib.summary.record_summaries_every_n_global_steps(opt.summary_freq * 3, global_step=global_step):
model.set_input(dataset.data)
model.optimize_parameters()
if opt.save_summaries:
# Summaries for Tensorboard:
tf.contrib.summary.scalar('loss/genA2B', model.genA2B_loss)
tf.contrib.summary.scalar('loss/genB2A', model.genB2A_loss)
tf.contrib.summary.scalar('loss/discA', model.discA_loss)
tf.contrib.summary.scalar('loss/discB', model.discB_loss)
tf.contrib.summary.scalar('loss/cyc', model.cyc_lossA + model.cyc_lossB)
tf.contrib.summary.scalar('loss/identity', model.id_lossA + model.id_lossB)
tf.contrib.summary.scalar('learning_rate', model.learning_rate)
tf.contrib.summary.image('A/generated', model.fakeA)
tf.contrib.summary.image('A/reconstructed', model.reconstructedA)
tf.contrib.summary.image('B/generated', model.fakeB)
tf.contrib.summary.image('B/reconstructed', model.reconstructedB)
print("Iteration complete")
# Assign decayed learning rate:
model.update_learning_rate(batches_per_epoch)
# Checkpoint the model:
Expand Down

0 comments on commit 148b32c

Please sign in to comment.