Skip to content
This repository has been archived by the owner on Mar 17, 2021. It is now read-only.

Commit

Permalink
Merge branch '142-csv_reader-nnhack' of github.com:NifTK/NiftyNet int…
Browse files Browse the repository at this point in the history
…o 142-csv_reader-nnhack

Conflict changed
  • Loading branch information
csudre committed Jul 3, 2019
2 parents 08654a1 + 3b226b2 commit 38961d9
Show file tree
Hide file tree
Showing 217 changed files with 22,273 additions and 1,067 deletions.
12 changes: 6 additions & 6 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ testjob:

- python net_classify.py train -c testing_data/test_classification.ini
- python net_classify.py inference -c testing_data/test_classification.ini
- python net_classify.py evaluation -c testing_data/test_classification.ini
#- python net_classify.py evaluation -c testing_data/test_classification.ini

- python net_regress.py train -c config/default_monomodal_regression.ini --batch_size=1 --name toynet --max_iter 10
- python net_regress.py inference -c config/default_monomodal_regression.ini --batch_size=7 --name toynet --spatial_window_size 84,84,84
Expand Down Expand Up @@ -197,7 +197,7 @@ testjob:

- coverage run -a --source . net_classify.py train -c testing_data/test_classification.ini
- coverage run -a --source . net_classify.py inference -c testing_data/test_classification.ini
- coverage run -a --source . net_classify.py evaluation -c testing_data/test_classification.ini
# - coverage run -a --source . net_classify.py evaluation -c testing_data/test_classification.ini

- coverage run -a --source . net_regress.py train -c config/default_monomodal_regression.ini --max_iter 10 --name toynet --batch_size=2
- coverage run -a --source . net_run.py train -a net_regress -c config/default_monomodal_regression.ini --max_iter 10 --name toynet --batch_size=2
Expand Down Expand Up @@ -408,10 +408,10 @@ pip-installer:

- net_classify train -c extensions/testing/test_classification.ini
- net_classify inference -c extensions/testing/test_classification.ini
- net_classify evaluation -c extensions/testing/test_classification.ini
#- net_classify evaluation -c extensions/testing/test_classification.ini
- net_run --app net_classify train -c extensions/testing/test_classification.ini
- net_run --app net_classify inference -c extensions/testing/test_classification.ini
- net_run --app net_classify evaluation -c extensions/testing/test_classification.ini
#- net_run --app net_classify evaluation -c extensions/testing/test_classification.ini

- net_regress train -c $niftynet_dir/config/default_monomodal_regression.ini --max_iter 10 --name toynet --batch_size=2
- net_regress inference -c $niftynet_dir/config/default_monomodal_regression.ini --name toynet --spatial_window_size 84,84,84 --batch_size 7
Expand Down Expand Up @@ -482,10 +482,10 @@ pip-installer:

- net_classify train -c extensions/testing/test_classification.ini
- net_classify inference -c extensions/testing/test_classification.ini
- net_classify evaluation -c extensions/testing/test_classification.ini
#- net_classify evaluation -c extensions/testing/test_classification.ini
- net_run --app net_classify train -c extensions/testing/test_classification.ini
- net_run --app net_classify inference -c extensions/testing/test_classification.ini
- net_run --app net_classify evaluation -c extensions/testing/test_classification.ini
#- net_run --app net_classify evaluation -c extensions/testing/test_classification.ini

- net_regress train -c $niftynet_dir/config/default_monomodal_regression.ini --max_iter 10 --name toynet --batch_size=2
- net_regress inference -c $niftynet_dir/config/default_monomodal_regression.ini --name toynet --spatial_window_size 84,84,84 --batch_size 7
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import tensorflow as tf

from niftynet.application.segmentation_application import \
SegmentationApplication
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.layer.loss_segmentation import LossFunction

SUPPORTED_INPUT = set(['image', 'label', 'weight'])


class DecayLearningRateApplication(SegmentationApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"

def __init__(self, net_param, action_param, is_training):
SegmentationApplication.__init__(
self, net_param, action_param, is_training)
tf.logging.info('starting decay learning segmentation application')
self.learning_rate = None
self.current_lr = action_param.lr
if self.action_param.validation_every_n > 0:
raise NotImplementedError("validation process is not implemented "
"in this demo.")

def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, self.is_training)

if self.is_training:
with tf.name_scope('Optimiser'):
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.learning_rate)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))

loss = data_loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.learning_rate, name='lr',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
SegmentationApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)

def set_iteration_update(self, iteration_message):
"""
This function will be called by the application engine at each
iteration.
"""
current_iter = iteration_message.current_iter
if iteration_message.is_training:
if current_iter > 0 and current_iter % 5 == 0:
self.current_lr = self.current_lr / 1.02
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import tensorflow as tf

from niftynet.application.segmentation_application import \
SegmentationApplication
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.layer.loss_segmentation import LossFunction

SUPPORTED_INPUT = set(['image', 'label', 'weight'])


class DecayLearningRateApplication(SegmentationApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"

def __init__(self, net_param, action_param, is_training):
SegmentationApplication.__init__(
self, net_param, action_param, is_training)
tf.logging.info('starting decay learning segmentation application')
self.learning_rate = None
self.current_lr = action_param.lr
if self.action_param.validation_every_n > 0:
raise NotImplementedError("validation process is not implemented "
"in this demo.")

def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, self.is_training)

if self.is_training:
with tf.name_scope('Optimiser'):
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.learning_rate)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))

loss = data_loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.learning_rate, name='lr',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
SegmentationApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)

def set_iteration_update(self, iteration_message):
"""
This function will be called by the application engine at each
iteration.
"""
current_iter = iteration_message.current_iter
if iteration_message.is_training:
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions demos/Learning_Rate_Decay/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Learning rate decay application

This application implements a simple learning rate schedule of
"halving the learning rate every 3 iterations" for segmentation applications.

The concept is general and could be used for other types of application. A brief demo is provide which can be fully run from a jupyter notebook provided a a working installation of NiftyNet exists on your system.

The core function is implemented by:

1) Adding a `self.learning_rate` placeholder, and connect it to the network
in `connect_data_and_network` function

2) Adding a `self.current_lr` variable to keep track of the current learning rate

3) Overriding the default `set_iteration_update` function provided in `BaseApplication`
so that `self.current_lr` is changed according to the `current_iter`.

4) To feed the `self.current_lr` value to the network, the data feeding dictionary
is updated within the customised `set_iteration_update` function, by
```
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr
```
`iteration_message.data_feed_dict` will be used in
`tf.Session.run(..., feed_dict=iteration_message.data_feed_dict)` by the engine
at each iteration.


*This demo only supports NiftyNet cloned from [GitHub](https://github.com/NifTK/NiftyNet).*
Further demos/ trained models can be found at [NiftyNet model zoo](https://github.com/NifTK/NiftyNetModelZoo/blob/master/dense_vnet_abdominal_ct_model_zoo.md).
87 changes: 87 additions & 0 deletions demos/Learning_Rate_Decay/decay_lr_application.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import tensorflow as tf

from niftynet.application.segmentation_application import \
SegmentationApplication
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.layer.loss_segmentation import LossFunction

SUPPORTED_INPUT = set(['image', 'label', 'weight'])


class DecayLearningRateApplication(SegmentationApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"

def __init__(self, net_param, action_param, is_training):
SegmentationApplication.__init__(
self, net_param, action_param, is_training)
tf.logging.info('starting decay learning segmentation application')
self.learning_rate = None
self.current_lr = action_param.lr
if self.action_param.validation_every_n > 0:
raise NotImplementedError("validation process is not implemented "
"in this demo.")

def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, self.is_training)

if self.is_training:
with tf.name_scope('Optimiser'):
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.learning_rate)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))

loss = data_loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.learning_rate, name='lr',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
SegmentationApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)

def set_iteration_update(self, iteration_message):
"""
This function will be called by the application engine at each
iteration.
"""
current_iter = iteration_message.current_iter
if iteration_message.is_training:
if current_iter > 0 and current_iter % 5 == 0:
self.current_lr = self.current_lr / 1.02
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr
63 changes: 63 additions & 0 deletions demos/Learning_Rate_Decay/learning_rate_demo_train_config.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
############################ input configuration sections
[images] # Name this as you see fit
path_to_search = ./data/decathlon_hippocampus
filename_contains = img_hippocampus_
filename_not_contains = ._
spatial_window_size = (24, 24, 24)
interp_order = 3

[label]
path_to_search = ./data/decathlon_hippocampus
filename_contains = label_hippocampus_
filename_not_contains = ._
spatial_window_size = (24, 24, 24)
interp_order = 0

############################## system configuration sections
[SYSTEM]
cuda_devices = ""
num_threads = 6
num_gpus = 1
model_dir = ./models/model_multimodal_toy
queue_length = 20

[NETWORK]
name = highres3dnet
activation_function = prelu
batch_size = 1
decay = 0
reg_type = L2

# Volume level pre-processing
volume_padding_size = 0
# Normalisation
whitening = True
normalise_foreground_only = False

[TRAINING]
sample_per_volume = 1
optimiser = gradientdescent
# rotation_angle = (-10.0, 10.0)
# scaling_percentage = (-10.0, 10.0)
# random_flipping_axes= 1
lr = 0.0001
loss_type = CrossEntropy
starting_iter = 0
save_every_n = 100
max_iter = 500
max_checkpoints = 20

[INFERENCE]
border = 5
#inference_iter = 10
save_seg_dir = ./output/toy
output_interp_order = 0
spatial_window_size = (64, 64, 64)

############################ custom configuration sections
[SEGMENTATION]
image = images
label = label
output_prob = False
num_classes = 3
label_normalisation = False
Loading

0 comments on commit 38961d9

Please sign in to comment.