Skip to content
This repository has been archived by the owner on Mar 21, 2024. It is now read-only.

Commit

Permalink
Update dicom-rt converter (#430)
Browse files Browse the repository at this point in the history
* Update dicom-rt converter

* Fix base classes to be able to pass roi interpreted types

* Update score.py

* Add Changelog

* Update Changelog

* Fix config

* Fix config name

* Fix variable name

* Fix variable name

* Fix variable name

* Fix tests

* Fix inference

* Fix inference score.py tests
  • Loading branch information
javier-alvarez committed Apr 13, 2021
1 parent 59d6995 commit f7c211a
Show file tree
Hide file tree
Showing 14 changed files with 58 additions and 15 deletions.
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ created.

## Upcoming

### Added
### Added
- ([#430](https://github.com/microsoft/InnerEye-DeepLearning/pull/430)) Update conversion to 1.0.1 InnerEye-DICOM-RT to add: manufacturer,
SoftwareVersions, Interpreter and ROIInterpretedTypes.
- ([#385](https://github.com/microsoft/InnerEye-DeepLearning/pull/385)) Add the ability to train a model on multiple
nodes in AzureML. Example: Add `--num_nodes=2` to the commandline arguments to train on 2 nodes.
- ([#366](https://github.com/microsoft/InnerEye-DeepLearning/pull/366)) and
Expand Down
9 changes: 9 additions & 0 deletions InnerEye/ML/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,15 @@ class SegmentationModelBase(ModelConfigBase):
"output of the model for that class includes postprocessing to fill holes, "
"in the same order as in ground_truth_ids_display_names")

roi_interpreted_types: List[str] = param.List(None, class_=str, bounds=(1, None), instantiate=False,
allow_None=True,
doc="List of str with the ROI interpreted Types. Possible values "
"(None, CTV, ORGAN, EXTERNAL)")

interpreter: str = param.String("Default_Interpreter", doc="The interpreter that created the DICOM-RT file")

manufacturer: str = param.String("Default_Manufacturer", doc="The manufacturer that created the DICOM-RT file")

_inference_stride_size: Optional[TupleInt3] = IntTuple(None, length=3, allow_None=True,
doc="The stride size in the inference pipeline. "
"At most, this should be the output_size to "
Expand Down
1 change: 1 addition & 0 deletions InnerEye/ML/configs/segmentation/BasicModel2Epochs.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def __init__(self, **kwargs: Any) -> None:
ground_truth_ids_display_names=fg_classes,
colours=[(255, 255, 255)] * len(fg_classes),
fill_holes=[False] * len(fg_classes),
roi_interpreted_types=["ORGAN"] * len(fg_classes),
mask_id="heart",
norm_method=PhotometricNormalizationMethod.CtWindow,
level=50,
Expand Down
1 change: 1 addition & 0 deletions InnerEye/ML/configs/segmentation/GbmBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def __init__(self, **kwargs: Any) -> None:
ground_truth_ids_display_names=fg_classes,
colours=[(255, 255, 255)] * len(fg_classes),
fill_holes=[False] * len(fg_classes),
roi_interpreted_types=["ORGAN"] * len(fg_classes),
num_dataload_workers=8,
mask_id=None,
norm_method=PhotometricNormalizationMethod.MriWindow,
Expand Down
5 changes: 5 additions & 0 deletions InnerEye/ML/configs/segmentation/HeadAndNeckBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def __init__(self,
ground_truth_ids_display_names: Optional[List[str]] = None,
colours: Optional[List[TupleInt3]] = None,
fill_holes: Optional[List[bool]] = None,
roi_interpreted_types: Optional[List[str]] = None,
class_weights: Optional[List[float]] = None,
slice_exclusion_rules: Optional[List[SliceExclusionRule]] = None,
summed_probability_rules: Optional[List[SummedProbabilityRule]] = None,
Expand All @@ -53,6 +54,8 @@ def __init__(self,
present then must be of the same length as ground_truth_ids.
:param fill_holes: Optional list of fill hole flags. If
present then must be of the same length as ground_truth_ids.
:param roi_interpreted_types: Optional list of roi_interpreted_types. If
present then must be of the same length as ground_truth_ids.
:param class_weights: Optional list of class weights. If
present then must be of the same length as ground_truth_ids + 1.
:param slice_exclusion_rules: Optional list of SliceExclusionRules.
Expand All @@ -65,6 +68,7 @@ def __init__(self,
num_structures = len(ground_truth_ids)
colours = colours or generate_random_colours_list(RANDOM_COLOUR_GENERATOR, num_structures)
fill_holes = fill_holes or [True] * num_structures
roi_interpreted_types = roi_interpreted_types or ["ORGAN"] * num_structures
ground_truth_ids_display_names = ground_truth_ids_display_names or [f"zz_{x}" for x in ground_truth_ids]
# The amount of GPU memory required increases with both the number of structures and the
# number of feature channels. The following is a sensible default to avoid out-of-memory,
Expand Down Expand Up @@ -115,6 +119,7 @@ def __init__(self,
largest_connected_component_foreground_classes=ground_truth_ids,
colours=colours,
fill_holes=fill_holes,
roi_interpreted_types=roi_interpreted_types,
class_weights=class_weights,
slice_exclusion_rules=slice_exclusion_rules,
summed_probability_rules=summed_probability_rules,
Expand Down
1 change: 1 addition & 0 deletions InnerEye/ML/configs/segmentation/HelloWorld.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ def __init__(self, **kwargs: Any) -> None:
# Post-processing - in this section we define our post processing configurations, in this case
# we are filling holes in the generated segmentation masks for all of the foreground classes.
fill_holes=[True] * len(fg_classes),
roi_interpreted_types=["ORGAN"] * len(fg_classes),

# Output - in this section we define settings that determine how our output looks like in this case
# we define the structure names and colours to use.
Expand Down
1 change: 1 addition & 0 deletions InnerEye/ML/configs/segmentation/Lung.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def __init__(self, **kwargs: Any) -> None:
ground_truth_ids_display_names=fg_display_names,
colours=[(255, 255, 255)] * len(fg_classes),
fill_holes=[False] * len(fg_classes),
roi_interpreted_types=["ORGAN"] * len(fg_classes),
largest_connected_component_foreground_classes=["lung_r", "lung_l", "heart"],
num_dataload_workers=8,
norm_method=PhotometricNormalizationMethod.CtWindow,
Expand Down
5 changes: 5 additions & 0 deletions InnerEye/ML/configs/segmentation/ProstateBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def __init__(self,
ground_truth_ids_display_names: Optional[List[str]] = None,
colours: Optional[List[TupleInt3]] = None,
fill_holes: Optional[List[bool]] = None,
roi_interpreted_types: Optional[List[str]] = None,
class_weights: Optional[List[float]] = None,
largest_connected_component_foreground_classes: Optional[List[str]] = None,
**kwargs: Any) -> None:
Expand All @@ -34,13 +35,16 @@ def __init__(self,
present then must be of the same length as ground_truth_ids.
:param fill_holes: Optional list of fill hole flags. If
present then must be of the same length as ground_truth_ids.
:param interpreted_types: Optional list of interpreted_types. If
present then must be of the same length as ground_truth_ids.
:param class_weights: Optional list of class weights. If
present then must be of the same length as ground_truth_ids + 1.
:param kwargs: Additional arguments that will be passed through to the SegmentationModelBase constructor.
"""
ground_truth_ids_display_names = ground_truth_ids_display_names or [f"zz_{name}" for name in ground_truth_ids]
colours = colours or [(255, 0, 0)] * len(ground_truth_ids)
fill_holes = fill_holes or [True] * len(ground_truth_ids)
roi_interpreted_types = roi_interpreted_types or ["ORGAN"] * len(ground_truth_ids)
class_weights = class_weights or equally_weighted_classes(ground_truth_ids, background_weight=0.02)
largest_connected_component_foreground_classes = largest_connected_component_foreground_classes or \
ground_truth_ids
Expand All @@ -55,6 +59,7 @@ def __init__(self,
ground_truth_ids_display_names=ground_truth_ids_display_names,
colours=colours,
fill_holes=fill_holes,
roi_interpreted_types=roi_interpreted_types,
image_channels=["ct"],
inference_batch_size=1,
inference_stride_size=(64, 256, 256),
Expand Down
1 change: 1 addition & 0 deletions InnerEye/ML/configs/unit_testing/passthrough_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def __init__(self, **kwargs: Any) -> None:
ground_truth_ids_display_names=fg_display_names,
colours=generate_random_colours_list(RANDOM_COLOUR_GENERATOR, len(fg_classes)),
fill_holes=[False] * len(fg_classes),
roi_interpreted_types=["ORGAN"] * len(fg_classes),
inference_batch_size=1,
class_weights=equally_weighted_classes(fg_classes, background_weight=0.02),
feature_channels=[1],
Expand Down
3 changes: 2 additions & 1 deletion InnerEye/Scripts/submit_for_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,8 @@ def submit_for_inference(args: SubmitForInferenceConfig, azure_config: AzureConf
# is then just the file relative to the data_folder
"--data_folder", image.parent.name,
"--image_files", image.name,
"--use_dicom", str(args.use_dicom)],
"--use_dicom", str(args.use_dicom),
"--model_id", model_id],
conda_dependencies_files=conda_files,
)
run_config = create_run_config(azure_config, source_config, environment_name=python_environment_name)
Expand Down
1 change: 1 addition & 0 deletions Tests/ML/configs/DummyModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def __init__(self, **kwargs: Any) -> None:
ground_truth_ids_display_names=self.fg_ids,
colours=[(255, 255, 255)] * len(self.fg_ids),
fill_holes=[False] * len(self.fg_ids),
roi_interpreted_types=["Organ"] * len(self.fg_ids),
mask_id="mask",
dataset_expected_spacing_xyz=(1.0, 1.0, 1.0),
norm_method=PhotometricNormalizationMethod.CtWindow,
Expand Down
24 changes: 15 additions & 9 deletions TestsOutsidePackage/test_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
extract_zipped_files_and_flatten, convert_zipped_dicom_to_nifti, \
convert_nifti_to_zipped_dicom_rt


test_image = full_ml_test_data_path("train_and_test_data") / "id1_channel1.nii.gz"
img_nii_path = full_ml_test_data_path("test_img.nii.gz")
# Expected zipped DICOM-RT file contents, just DEFAULT_RESULT_ZIP_DICOM_NAME without the final suffix.
Expand Down Expand Up @@ -119,7 +118,8 @@ def test_unpack_one_set_zip(zip_file_contents: List[Path], test_output_dirs: Out
@pytest.mark.parametrize("zip_file_contents", TEST_ZIP_FILE_PATHS_ALL2)
def test_unpack_two_distinct_sets_zip(zip_file_contents: List[Path], test_output_dirs: OutputFolderForTests) -> None:
"""
Test that a zip file containing two distinct set of files in two folders, but possibly in a series of nesting folders,
Test that a zip file containing two distinct set of files in two folders, but possibly in a series of nesting
folders,
can be extracted into a folder containing only the files.
:param zip_file_contents: List of relative file paths to create and test.
Expand Down Expand Up @@ -233,7 +233,7 @@ def test_convert_nifti_to_zipped_dicom_rt(test_output_dirs: OutputFolderForTests
convert_zipped_dicom_to_nifti(zipped_dicom_series_path, reference_series_folder, nifti_filename)
model_config = PassThroughModel()
result_dst = convert_nifti_to_zipped_dicom_rt(HNSEGMENTATION_FILE, reference_series_folder, model_folder,
model_config, DEFAULT_RESULT_ZIP_DICOM_NAME)
model_config, DEFAULT_RESULT_ZIP_DICOM_NAME, model_id="test_model:1")
assert_zip_file_contents(result_dst, HN_DICOM_RT_ZIPPED, model_folder)


Expand All @@ -253,7 +253,8 @@ def test_score_image_dicom_two_inputs(test_output_dirs: OutputFolderForTests) ->
image_files=[str(zipped_dicom_series_path), str(zipped_dicom_series_path)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

with pytest.raises(ValueError) as e:
score_image(score_pipeline_config)
Expand All @@ -278,7 +279,8 @@ def test_score_image_dicom_not_zip_input(test_output_dirs: OutputFolderForTests)
image_files=[str(test_file)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

with pytest.raises(zipfile.BadZipFile):
score_image(score_pipeline_config)
Expand Down Expand Up @@ -309,7 +311,8 @@ def test_score_image_dicom_mock_all(test_output_dirs: OutputFolderForTests) -> N
image_files=[str(zipped_dicom_series_path)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

with mock.patch('score.init_from_model_inference_json',
return_value=(mock_pipeline_base, model_config)) as mock_init_from_model_inference_json:
Expand Down Expand Up @@ -356,7 +359,8 @@ def test_score_image_dicom_mock_run_store(test_output_dirs: OutputFolderForTests
image_files=[str(zipped_dicom_series_path)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

with mock.patch('score.run_inference',
return_value=mock_segmentation) as mock_run_inference:
Expand Down Expand Up @@ -398,7 +402,8 @@ def test_score_image_dicom_mock_run(test_output_dirs: OutputFolderForTests) -> N
image_files=[str(zipped_dicom_series_path)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

image_with_header = io_util.load_nifti_image(HNSEGMENTATION_FILE)

Expand Down Expand Up @@ -438,7 +443,8 @@ def test_score_image_dicom_mock_none(test_output_dirs: OutputFolderForTests) ->
image_files=[str(zipped_dicom_series_path)],
result_image_name=HNSEGMENTATION_FILE.name,
use_gpu=False,
use_dicom=True)
use_dicom=True,
model_id="Dummy:1")

segmentation = score_image(score_pipeline_config)
assert_zip_file_contents(segmentation, HN_DICOM_RT_ZIPPED, model_folder)
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ dependencies:
- gitpython==3.1.7
- gputil==1.4.0
- h5py==2.10.0
- InnerEye-DICOM-RT==1.0.0
- InnerEye-DICOM-RT==1.0.1
- joblib==0.16.0
- jupyter==1.0.0
- jupyter-client==6.1.5
Expand Down
15 changes: 12 additions & 3 deletions score.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ class ScorePipelineConfig(GenericConfig):
"containing a set of DICOM files.")
result_zip_dicom_name: str = param.String(DEFAULT_RESULT_ZIP_DICOM_NAME,
doc="The name of the zipped DICOM-RT file if use_dicom set.")
model_id: str = param.String(allow_None=False,
doc="The AzureML model ID. This is added to the SoftwareVersions DICOM tag in the "
"DICOM-RT output")


def init_from_model_inference_json(model_folder: Path, use_gpu: bool = True) -> Tuple[FullImageInferencePipelineBase,
Expand Down Expand Up @@ -196,7 +199,7 @@ def convert_rgb_colour_to_hex(colour: TupleInt3) -> str:


def convert_nifti_to_zipped_dicom_rt(nifti_file: Path, reference_series: Path, scratch_folder: Path,
config: SegmentationModelBase, dicom_rt_zip_file_name: str) -> Path:
config: SegmentationModelBase, dicom_rt_zip_file_name: str, model_id: str) -> Path:
"""
Given a Nifti file and a reference DICOM series, create zip file containing a DICOM-RT file.
Expand All @@ -209,6 +212,7 @@ def convert_nifti_to_zipped_dicom_rt(nifti_file: Path, reference_series: Path, s
:param scratch_folder: Scratch folder to extract files into.
:param config: Model config.
:param dicom_rt_zip_file_name: Target DICOM-RT zip file name, ending in .dcm.zip.
:param model_id: The AzureML model ID <model_name>:<ID>
:return: Path to DICOM-RT file.
"""
dicom_rt_file_path = scratch_folder / Path(dicom_rt_zip_file_name).with_suffix("")
Expand All @@ -218,7 +222,12 @@ def convert_nifti_to_zipped_dicom_rt(nifti_file: Path, reference_series: Path, s
out_file=dicom_rt_file_path,
struct_names=config.ground_truth_ids_display_names,
struct_colors=[convert_rgb_colour_to_hex(rgb) for rgb in config.colours],
fill_holes=config.fill_holes)
fill_holes=config.fill_holes,
roi_interpreted_types=config.roi_interpreted_types,
manufacturer=config.manufacturer,
interpreter=config.interpreter,
modelId=model_id
)
# Log stdout, stderr from DICOM-RT conversion.
logging.debug("stdout: %s", stdout)
logging.debug("stderr: %s", stderr)
Expand Down Expand Up @@ -286,7 +295,7 @@ def score_image(args: ScorePipelineConfig) -> Path:

if args.use_dicom:
result_dst = convert_nifti_to_zipped_dicom_rt(result_dst, reference_series_folder, model_folder,
config, args.result_zip_dicom_name)
config, args.result_zip_dicom_name, args.model_id)

if not is_offline_run_context(run_context):
upload_file_name = args.result_zip_dicom_name if args.use_dicom else args.result_image_name
Expand Down

0 comments on commit f7c211a

Please sign in to comment.