From 9bea453ba607050cb08dfd599c1c7166d70843ee Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sat, 5 Feb 2022 14:11:38 +0200 Subject: [PATCH 1/5] Add `black` to the dev-dependencies --- poetry.lock | 251 +++++++++++++++++++++++++++++++++++++++++-------- pyproject.toml | 1 + 2 files changed, 213 insertions(+), 39 deletions(-) diff --git a/poetry.lock b/poetry.lock index 009f267..8b34aab 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,3 +1,46 @@ +[[package]] +name = "black" +version = "22.1.0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6.2" + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = ">=1.1.0" +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "click" +version = "8.0.3" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "colorama" +version = "0.4.4" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "dicom-numpy" version = "0.6.2" @@ -14,6 +57,23 @@ pydicom = ">=1.0" dev = ["check-manifest", "sphinx", "sphinx-autobuild"] test = ["coverage", "pytest"] +[[package]] +name = "importlib-metadata" +version = "4.10.1" +description = "Read metadata from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +perf = ["ipython"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] + [[package]] name = "itk" version = "5.2.1.post1" @@ -97,6 +157,14 @@ python-versions = "*" [package.dependencies] itk-filtering = "5.2.1.post1" +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + [[package]] name = "nibabel" version = "3.2.1" @@ -120,14 +188,6 @@ spm = ["scipy"] style = ["flake8"] test = ["coverage", "pytest (!=5.3.4)", "pytest-cov", "pytest-doctestplus"] -[[package]] -name = "numpy" -version = "1.21.5" -description = "NumPy is the fundamental package for array computing with Python." -category = "main" -optional = false -python-versions = ">=3.7,<3.11" - [[package]] name = "numpy" version = "1.22.2" @@ -147,6 +207,26 @@ python-versions = ">=3.6" [package.dependencies] pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" +[[package]] +name = "pathspec" +version = "0.9.0" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[[package]] +name = "platformdirs" +version = "2.4.1" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] +test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] + [[package]] name = "pydicom" version = "2.2.2" @@ -166,16 +246,89 @@ python-versions = ">=3.6" [package.extras] diagrams = ["jinja2", "railroad-diagrams"] +[[package]] +name = "tomli" +version = "2.0.0" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "typed-ast" +version = "1.5.2" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "typing-extensions" +version = "4.0.1" +description = "Backported and Experimental Type Hints for Python 3.6+" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "zipp" +version = "3.7.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] + [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "04d8669248a8a91019814131796762dfd40a60badc905e11cd37b9709d10fef0" +content-hash = "b3ce8b4e1f1d1c11e62f2469305f7630bba53b5872c010b65026e001c69d3eca" [metadata.files] +black = [ + {file = "black-22.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1297c63b9e1b96a3d0da2d85d11cd9bf8664251fd69ddac068b98dc4f34f73b6"}, + {file = "black-22.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2ff96450d3ad9ea499fc4c60e425a1439c2120cbbc1ab959ff20f7c76ec7e866"}, + {file = "black-22.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e21e1f1efa65a50e3960edd068b6ae6d64ad6235bd8bfea116a03b21836af71"}, + {file = "black-22.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f69158a7d120fd641d1fa9a921d898e20d52e44a74a6fbbcc570a62a6bc8ab"}, + {file = "black-22.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:228b5ae2c8e3d6227e4bde5920d2fc66cc3400fde7bcc74f480cb07ef0b570d5"}, + {file = "black-22.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b1a5ed73ab4c482208d20434f700d514f66ffe2840f63a6252ecc43a9bc77e8a"}, + {file = "black-22.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35944b7100af4a985abfcaa860b06af15590deb1f392f06c8683b4381e8eeaf0"}, + {file = "black-22.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7835fee5238fc0a0baf6c9268fb816b5f5cd9b8793423a75e8cd663c48d073ba"}, + {file = "black-22.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dae63f2dbf82882fa3b2a3c49c32bffe144970a573cd68d247af6560fc493ae1"}, + {file = "black-22.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa1db02410b1924b6749c245ab38d30621564e658297484952f3d8a39fce7e8"}, + {file = "black-22.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c8226f50b8c34a14608b848dc23a46e5d08397d009446353dad45e04af0c8e28"}, + {file = "black-22.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2d6f331c02f0f40aa51a22e479c8209d37fcd520c77721c034517d44eecf5912"}, + {file = "black-22.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:742ce9af3086e5bd07e58c8feb09dbb2b047b7f566eb5f5bc63fd455814979f3"}, + {file = "black-22.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fdb8754b453fb15fad3f72cd9cad3e16776f0964d67cf30ebcbf10327a3777a3"}, + {file = "black-22.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5660feab44c2e3cb24b2419b998846cbb01c23c7fe645fee45087efa3da2d61"}, + {file = "black-22.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:6f2f01381f91c1efb1451998bd65a129b3ed6f64f79663a55fe0e9b74a5f81fd"}, + {file = "black-22.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:efbadd9b52c060a8fc3b9658744091cb33c31f830b3f074422ed27bad2b18e8f"}, + {file = "black-22.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8871fcb4b447206904932b54b567923e5be802b9b19b744fdff092bd2f3118d0"}, + {file = "black-22.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccad888050f5393f0d6029deea2a33e5ae371fd182a697313bdbd835d3edaf9c"}, + {file = "black-22.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07e5c049442d7ca1a2fc273c79d1aecbbf1bc858f62e8184abe1ad175c4f7cc2"}, + {file = "black-22.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:373922fc66676133ddc3e754e4509196a8c392fec3f5ca4486673e685a421321"}, + {file = "black-22.1.0-py3-none-any.whl", hash = "sha256:3524739d76b6b3ed1132422bf9d82123cd1705086723bc3e235ca39fd21c667d"}, + {file = "black-22.1.0.tar.gz", hash = "sha256:a7c0192d35635f6fc1174be575cb7915e92e5dd629ee79fdaf0dcfa41a80afb5"}, +] +click = [ + {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, + {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, +] +colorama = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] dicom-numpy = [ {file = "dicom_numpy-0.6.2-py2.py3-none-any.whl", hash = "sha256:361c8dfc52d625bf3344e5c2745e9c928d263999a4c094fe285d9fe461895ea9"}, {file = "dicom_numpy-0.6.2.tar.gz", hash = "sha256:24b993083368efb868ffe5edcab054db5c11f0587a218a6b6492fde14a87acd9"}, ] +importlib-metadata = [ + {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, + {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, +] itk = [ {file = "itk-5.2.1.post1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d18723ca6791fc5d9c7498e03d73929df56acffd9290ed8f61a24f25a138951e"}, {file = "itk-5.2.1.post1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:fddd62554da37254eb8de4cd2660a8d9e601af88df50e7017202ed269c20a584"}, @@ -309,41 +462,15 @@ itk-segmentation = [ {file = "itk_segmentation-5.2.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e7d4953bd4e2b2d36539944c8bef33e50266398249e8af3c4a5b31a291f72b"}, {file = "itk_segmentation-5.2.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:136b995ee4f65096792c8be41c696d1dd384364f89cc84a0d8fb003f12ab6b9e"}, ] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] nibabel = [ {file = "nibabel-3.2.1-py3-none-any.whl", hash = "sha256:7e26cbf60eae8668785fa970294f05f767cefc5538b9e22aa388a07f62c54ebc"}, {file = "nibabel-3.2.1.tar.gz", hash = "sha256:4d2ff9426b740011a1c916b54fc25da9348282e727eaa2ea163f42e00f1fc29e"}, ] numpy = [ - {file = "numpy-1.21.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:301e408a052fdcda5cdcf03021ebafc3c6ea093021bf9d1aa47c54d48bdad166"}, - {file = "numpy-1.21.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7e8f6216f180f3fd4efb73de5d1eaefb5f5a1ee5b645c67333033e39440e63a"}, - {file = "numpy-1.21.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc7a7d7b0ed72589fd8b8486b9b42a564f10b8762be8bd4d9df94b807af4a089"}, - {file = "numpy-1.21.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58ca1d7c8aef6e996112d0ce873ac9dfa1eaf4a1196b4ff7ff73880a09923ba7"}, - {file = "numpy-1.21.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4b2fb01f1b4ddbe2453468ea0719f4dbb1f5caa712c8b21bb3dd1480cd30d9"}, - {file = "numpy-1.21.5-cp310-cp310-win_amd64.whl", hash = "sha256:cc1b30205d138d1005adb52087ff45708febbef0e420386f58664f984ef56954"}, - {file = "numpy-1.21.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:08de8472d9f7571f9d51b27b75e827f5296295fa78817032e84464be8bb905bc"}, - {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4fe6a006557b87b352c04596a6e3f12a57d6e5f401d804947bd3188e6b0e0e76"}, - {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3d893b0871322eaa2f8c7072cdb552d8e2b27645b7875a70833c31e9274d4611"}, - {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:341dddcfe3b7b6427a28a27baa59af5ad51baa59bfec3264f1ab287aa3b30b13"}, - {file = "numpy-1.21.5-cp37-cp37m-win32.whl", hash = "sha256:ca9c23848292c6fe0a19d212790e62f398fd9609aaa838859be8459bfbe558aa"}, - {file = "numpy-1.21.5-cp37-cp37m-win_amd64.whl", hash = "sha256:025b497014bc33fc23897859350f284323f32a2fff7654697f5a5fc2a19e9939"}, - {file = "numpy-1.21.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a5098df115340fb17fc93867317a947e1dcd978c3888c5ddb118366095851f8"}, - {file = "numpy-1.21.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:311283acf880cfcc20369201bd75da907909afc4666966c7895cbed6f9d2c640"}, - {file = "numpy-1.21.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b545ebadaa2b878c8630e5bcdb97fc4096e779f335fc0f943547c1c91540c815"}, - {file = "numpy-1.21.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c5562bcc1a9b61960fc8950ade44d00e3de28f891af0acc96307c73613d18f6e"}, - {file = "numpy-1.21.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eed2afaa97ec33b4411995be12f8bdb95c87984eaa28d76cf628970c8a2d689a"}, - {file = "numpy-1.21.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61bada43d494515d5b122f4532af226fdb5ee08fe5b5918b111279843dc6836a"}, - {file = "numpy-1.21.5-cp38-cp38-win32.whl", hash = "sha256:7b9d6b14fc9a4864b08d1ba57d732b248f0e482c7b2ff55c313137e3ed4d8449"}, - {file = "numpy-1.21.5-cp38-cp38-win_amd64.whl", hash = "sha256:dbce7adeb66b895c6aaa1fad796aaefc299ced597f6fbd9ceddb0dd735245354"}, - {file = "numpy-1.21.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:507c05c7a37b3683eb08a3ff993bd1ee1e6c752f77c2f275260533b265ecdb6c"}, - {file = "numpy-1.21.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:00c9fa73a6989895b8815d98300a20ac993c49ac36c8277e8ffeaa3631c0dbbb"}, - {file = "numpy-1.21.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69a5a8d71c308d7ef33ef72371c2388a90e3495dbb7993430e674006f94797d5"}, - {file = "numpy-1.21.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2d8adfca843bc46ac199a4645233f13abf2011a0b2f4affc5c37cd552626f27b"}, - {file = "numpy-1.21.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c293d3c0321996cd8ffe84215ffe5d269fd9d1d12c6f4ffe2b597a7c30d3e593"}, - {file = "numpy-1.21.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c978544be9e04ed12016dd295a74283773149b48f507d69b36f91aa90a643e5"}, - {file = "numpy-1.21.5-cp39-cp39-win32.whl", hash = "sha256:2a9add27d7fc0fdb572abc3b2486eb3b1395da71e0254c5552b2aad2a18b5441"}, - {file = "numpy-1.21.5-cp39-cp39-win_amd64.whl", hash = "sha256:1964db2d4a00348b7a60ee9d013c8cb0c566644a589eaa80995126eac3b99ced"}, - {file = "numpy-1.21.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a7c4b701ca418cd39e28ec3b496e6388fe06de83f5f0cb74794fa31cfa384c02"}, - {file = "numpy-1.21.5.zip", hash = "sha256:6a5928bc6241264dce5ed509e66f33676fc97f464e7a919edc672fb5532221ee"}, {file = "numpy-1.22.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:515a8b6edbb904594685da6e176ac9fbea8f73a5ebae947281de6613e27f1956"}, {file = "numpy-1.22.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76a4f9bce0278becc2da7da3b8ef854bed41a991f4226911a24a9711baad672c"}, {file = "numpy-1.22.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:168259b1b184aa83a514f307352c25c56af111c269ffc109d9704e81f72e764b"}, @@ -368,6 +495,14 @@ packaging = [ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] +pathspec = [ + {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, + {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, +] +platformdirs = [ + {file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"}, + {file = "platformdirs-2.4.1.tar.gz", hash = "sha256:440633ddfebcc36264232365d7840a970e75e1018d15b4327d11f91909045fda"}, +] pydicom = [ {file = "pydicom-2.2.2-py3-none-any.whl", hash = "sha256:6ecb9c6d56a20b2104099b8ef8fe0f3664d797b08a0e0548fe0311b515b32308"}, {file = "pydicom-2.2.2.tar.gz", hash = "sha256:f51078da1fe1285431a6f14a5ca0273520391ef28a68b32b9de73cc7f93f38be"}, @@ -376,3 +511,41 @@ pyparsing = [ {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, ] +tomli = [ + {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"}, + {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"}, +] +typed-ast = [ + {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, + {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"}, + {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"}, + {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"}, + {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"}, + {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"}, + {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"}, + {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"}, + {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"}, + {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"}, + {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"}, + {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"}, + {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"}, + {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"}, + {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"}, + {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"}, + {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"}, + {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"}, + {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"}, + {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"}, + {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"}, + {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"}, + {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"}, + {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, +] +typing-extensions = [ + {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, + {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, +] +zipp = [ + {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, + {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, +] diff --git a/pyproject.toml b/pyproject.toml index 1b6c2e5..84a758c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ numpy = [ ] [tool.poetry.dev-dependencies] +black = "^22.1.0" [tool.poetry.urls] "Bug Tracker" = "https://github.com/RSIP-Vision/medio/issues" From f5925c6f1cb619b3789b039670357b2dd2ca404e Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sat, 5 Feb 2022 14:25:44 +0200 Subject: [PATCH 2/5] Format the codebase with `black` --- medio/__init__.py | 4 +- medio/backends/itk_io.py | 194 ++++++++++++------ medio/backends/nib_io.py | 47 +++-- medio/backends/pdcm_io.py | 95 ++++++--- medio/backends/pdcm_unpack_ds.py | 19 +- medio/metadata/affine.py | 12 +- medio/metadata/convert_nib_itk.py | 8 +- medio/metadata/dcm_uid.py | 2 +- medio/metadata/itk_orientation.py | 13 +- medio/metadata/metadata.py | 69 ++++--- medio/metadata/pdcm_ds.py | 54 ++++- medio/read_save.py | 71 +++++-- medio/utils/files.py | 42 ++-- medio/utils/two_way_dict.py | 1 + .../itk_dcm_orientations.py | 65 +++--- tests/itk_dcm_orientations/itk_utils.py | 4 +- .../orientations_utils.py | 24 +-- 17 files changed, 491 insertions(+), 233 deletions(-) diff --git a/medio/__init__.py b/medio/__init__.py index 3325fa9..a647ccd 100644 --- a/medio/__init__.py +++ b/medio/__init__.py @@ -3,6 +3,6 @@ from medio.read_save import read_img, save_img, save_dir from medio import backends, metadata, medimg, utils -__version__ = '0.4.1' +__version__ = "0.4.1" -__all__ = ['read_img', 'save_img', 'save_dir', 'MetaData', 'Affine', '__version__'] +__all__ = ["read_img", "save_img", "save_dir", "MetaData", "Affine", "__version__"] diff --git a/medio/backends/itk_io.py b/medio/backends/itk_io.py index 00eaf97..989e39c 100644 --- a/medio/backends/itk_io.py +++ b/medio/backends/itk_io.py @@ -14,16 +14,23 @@ class ItkIO: - coord_sys = 'itk' + coord_sys = "itk" DEFAULT_COMPONENTS_AXIS = 0 # in the transposed image # default image type: dimension = 3 - pixel_type = itk.ctype('short') # signed short - int16 + pixel_type = itk.ctype("short") # signed short - int16 image_type = itk.Image[pixel_type, dimension] @staticmethod - def read_img(input_path, desired_axcodes=None, header=False, components_axis=None, pixel_type=pixel_type, - fallback_only=True, series=None): + def read_img( + input_path, + desired_axcodes=None, + header=False, + components_axis=None, + pixel_type=pixel_type, + fallback_only=True, + series=None, + ): """ The main reader function, reads images and performs reorientation and unpacking :param input_path: path of image file or directory containing dicom series @@ -38,7 +45,9 @@ def read_img(input_path, desired_axcodes=None, header=False, components_axis=Non """ input_path = Path(input_path) if input_path.is_dir(): - img = ItkIO.read_dir(str(input_path), pixel_type, fallback_only, series=series, header=header) + img = ItkIO.read_dir( + str(input_path), pixel_type, fallback_only, series=series, header=header + ) elif input_path.is_file(): img = ItkIO.read_img_file(str(input_path), pixel_type, fallback_only) else: @@ -49,26 +58,43 @@ def read_img(input_path, desired_axcodes=None, header=False, components_axis=Non if (desired_axcodes is None) or (desired_axcodes == metadata.ornt): image_np = ItkIO.itk_img_to_array(img) else: - orig_ornt = metadata.ornt # store the original orientation before the reorientation + orig_ornt = ( + metadata.ornt + ) # store the original orientation before the reorientation img, _ = ItkIO.reorient(img, desired_axcodes) image_np, affine = ItkIO.unpack_img(img) - metadata = MetaData(affine=affine, orig_ornt=orig_ornt, coord_sys=ItkIO.coord_sys) + metadata = MetaData( + affine=affine, orig_ornt=orig_ornt, coord_sys=ItkIO.coord_sys + ) if header: # TODO: not implemented for a series (returns an empty dictionary), see ItkIO.read_dir metadict = img.GetMetaDataDictionary() - metadata.header = {key: metadict[key] for key in metadict.GetKeys() if not key.startswith('ITK_')} + metadata.header = { + key: metadict[key] + for key in metadict.GetKeys() + if not key.startswith("ITK_") + } # TODO: consider unifying with PdcmIO.move_channels_axis n_components = img.GetNumberOfComponentsPerPixel() if (n_components > 1) and (components_axis is not None): # assert image_np.shape[ItkIO.DEFAULT_COMPONENTS_AXIS] == n_components - image_np = np.moveaxis(image_np, ItkIO.DEFAULT_COMPONENTS_AXIS, components_axis) + image_np = np.moveaxis( + image_np, ItkIO.DEFAULT_COMPONENTS_AXIS, components_axis + ) return image_np, metadata @staticmethod - def save_img(filename, image_np, metadata, use_original_ornt=True, components_axis=None, - allow_dcm_reorient=False, compression=False): + def save_img( + filename, + image_np, + metadata, + use_original_ornt=True, + components_axis=None, + allow_dcm_reorient=False, + compression=False, + ): """ Save an image file with itk :param filename: the filename to save, str or os.PathLike @@ -82,22 +108,40 @@ def save_img(filename, image_np, metadata, use_original_ornt=True, components_ax """ is_dcm = is_dicom(filename, check_exist=False) if is_dcm: - image_np = ItkIO.prepare_dcm_array(image_np, is_vector=components_axis is not None) - image = ItkIO.prepare_image(image_np, metadata, use_original_ornt, components_axis=components_axis, - is_dcm=is_dcm, allow_dcm_reorient=allow_dcm_reorient) + image_np = ItkIO.prepare_dcm_array( + image_np, is_vector=components_axis is not None + ) + image = ItkIO.prepare_image( + image_np, + metadata, + use_original_ornt, + components_axis=components_axis, + is_dcm=is_dcm, + allow_dcm_reorient=allow_dcm_reorient, + ) ItkIO.save_img_file(image, str(filename), compression=compression) @staticmethod - def prepare_image(image_np, metadata, use_original_ornt, components_axis=None, is_dcm=False, - allow_dcm_reorient=False): + def prepare_image( + image_np, + metadata, + use_original_ornt, + components_axis=None, + is_dcm=False, + allow_dcm_reorient=False, + ): """Prepare image for saving""" orig_coord_sys = metadata.coord_sys metadata.convert(ItkIO.coord_sys) desired_ornt = metadata.orig_ornt if use_original_ornt else None if is_dcm: # checking right-handed orientation before saving a dicom file/series - desired_ornt = check_dcm_ornt(desired_ornt, metadata, allow_dcm_reorient=allow_dcm_reorient) - image = ItkIO.pack2img(image_np, metadata.affine, components_axis=components_axis) + desired_ornt = check_dcm_ornt( + desired_ornt, metadata, allow_dcm_reorient=allow_dcm_reorient + ) + image = ItkIO.pack2img( + image_np, metadata.affine, components_axis=components_axis + ) if (desired_ornt is not None) and (desired_ornt != metadata.ornt): image, _ = ItkIO.reorient(image, desired_ornt) metadata.convert(orig_coord_sys) @@ -123,11 +167,13 @@ def prepare_dcm_array(image_np, is_vector=False): if np.array_equal(arr, image_np): return arr - raise NotImplementedError('Saving a single dicom file with ItkIO is currently supported only for \n' - '1. 2d images - int16, uint16, uint8\n' - '2. 3d images with integer nonnegative values - uint8, uint16\n' - '3. 2d/3d RGB[A] images - uint8 (with channels_axis)\n' - 'For negative values, try to save a dicom directory or use PdcmIO.save_arr2dcm_file') + raise NotImplementedError( + "Saving a single dicom file with ItkIO is currently supported only for \n" + "1. 2d images - int16, uint16, uint8\n" + "2. 3d images with integer nonnegative values - uint8, uint16\n" + "3. 2d/3d RGB[A] images - uint8 (with channels_axis)\n" + "For negative values, try to save a dicom directory or use PdcmIO.save_arr2dcm_file" + ) @staticmethod def read_img_file(filename, pixel_type=None, fallback_only=False): @@ -140,7 +186,7 @@ def read_img_file_long(filename, image_type=image_type): reader = itk.ImageFileReader[image_type].New() reader.SetFileName(filename) reader.Update() - image_io = str(reader.GetImageIO()).split(' ')[0] + image_io = str(reader.GetImageIO()).split(" ")[0] image = reader.GetOutput() return image, image_io @@ -162,7 +208,9 @@ def save_img_file_long(image, filename, compression=False): @staticmethod def itk_img_to_array(img_itk): """Swap the axes to the usual x, y, z convention in RAI orientation (originally z, y, x)""" - img_array = itk.array_from_image(img_itk).T # the transpose here is equivalent to keep_axes=True + img_array = itk.array_from_image( + img_itk + ).T # the transpose here is equivalent to keep_axes=True return img_array @staticmethod @@ -170,9 +218,13 @@ def array_to_itk_img(img_array, components_axis=None): """Set components_axis to not None for vector images, e.g. RGB""" is_vector = False if components_axis is not None: - img_array = np.moveaxis(img_array, components_axis, ItkIO.DEFAULT_COMPONENTS_AXIS) + img_array = np.moveaxis( + img_array, components_axis, ItkIO.DEFAULT_COMPONENTS_AXIS + ) is_vector = True - img_itk = itk.image_from_array(img_array.T.copy(), is_vector=is_vector) # copy is crucial for the ordering + img_itk = itk.image_from_array( + img_array.T.copy(), is_vector=is_vector + ) # copy is crucial for the ordering return img_itk @staticmethod @@ -183,7 +235,9 @@ def unpack_img(img): @staticmethod def get_img_aff(img): - direction = itk.array_from_vnl_matrix(img.GetDirection().GetVnlMatrix().as_matrix()) + direction = itk.array_from_vnl_matrix( + img.GetDirection().GetVnlMatrix().as_matrix() + ) spacing = itk.array_from_vnl_vector(img.GetSpacing().GetVnlVector()) origin = itk.array_from_vnl_vector(img.GetOrigin().GetVnlVector()) return Affine(direction=direction, spacing=spacing, origin=origin) @@ -204,12 +258,12 @@ def set_img_aff(image, affine): # setting metadata spacing_vec = itk.Vector[itk.D, dimension]() - spacing_vec.SetVnlVector(itk.vnl_vector_from_array(spacing.astype('float'))) + spacing_vec.SetVnlVector(itk.vnl_vector_from_array(spacing.astype("float"))) image.SetSpacing(spacing_vec) - image.SetOrigin(origin.astype('float')) + image.SetOrigin(origin.astype("float")) - direction_mat = itk.vnl_matrix_from_array(direction_arr.astype('float')) + direction_mat = itk.vnl_matrix_from_array(direction_arr.astype("float")) direction = itk.Matrix[itk.D, dimension, dimension](direction_mat) image.SetDirection(direction) @@ -231,7 +285,9 @@ def reorient(img, desired_orientation: Union[int, tuple, str, None]): return reoriented_itk_img, original_orientation_code @staticmethod - def read_dir(dirname, pixel_type=None, fallback_only=False, series=None, header=False): + def read_dir( + dirname, pixel_type=None, fallback_only=False, series=None, header=False + ): """ Read a dicom directory. If there is more than one series in the directory an error is raised (unless the series argument is used properly). @@ -245,7 +301,9 @@ def read_dir(dirname, pixel_type=None, fallback_only=False, series=None, header= # reader.Update() # metadict_arr = reader.GetMetaDataDictionaryArray() # (See also itk.imread source code) - raise NotImplementedError("header=True is currently not supported for a series") + raise NotImplementedError( + "header=True is currently not supported for a series" + ) return itk.imread(filenames, pixel_type, fallback_only) @staticmethod @@ -264,8 +322,17 @@ def extract_series(dirname, series=None): return filenames @staticmethod - def save_dcm_dir(dirname, image_np, metadata, use_original_ornt=True, components_axis=None, parents=False, - exist_ok=False, allow_dcm_reorient=False, **kwargs): + def save_dcm_dir( + dirname, + image_np, + metadata, + use_original_ornt=True, + components_axis=None, + parents=False, + exist_ok=False, + allow_dcm_reorient=False, + **kwargs, + ): """ Save a 3d numpy array image_np as a dicom series of 2d dicom slices in the directory dirname :param dirname: the directory to save in the files, str or pathlib.Path. If it exists - must be empty @@ -279,8 +346,14 @@ def save_dcm_dir(dirname, image_np, metadata, use_original_ornt=True, components :param allow_dcm_reorient: whether to allow automatic reorientation to a right-handed orientation or not :param kwargs: optional kwargs passed to ItkIO.dcm_metadata: pattern, metadata_dict """ - image = ItkIO.prepare_image(image_np, metadata, use_original_ornt, components_axis=components_axis, - is_dcm=True, allow_dcm_reorient=allow_dcm_reorient) + image = ItkIO.prepare_image( + image_np, + metadata, + use_original_ornt, + components_axis=components_axis, + is_dcm=True, + allow_dcm_reorient=allow_dcm_reorient, + ) image_type = type(image) _, (pixel_type, _) = itk.template(image) image2d_type = itk.Image[pixel_type, 2] @@ -298,7 +371,7 @@ def save_dcm_dir(dirname, image_np, metadata, use_original_ornt=True, components writer.Update() @staticmethod - def dcm_series_metadata(image, dirname, pattern='IM{}.dcm', metadata_dict=None): + def dcm_series_metadata(image, dirname, pattern="IM{}.dcm", metadata_dict=None): """ Return dicom series metadata per slice and filenames :param image: the full itk image to be saved as dicom series @@ -315,40 +388,45 @@ def dcm_series_metadata(image, dirname, pattern='IM{}.dcm', metadata_dict=None): mdict = itk.MetaDataDictionary() # Series Instance UID - mdict['0020|000e'] = generate_uid() + mdict["0020|000e"] = generate_uid() # Study Instance UID - mdict['0020|000d'] = generate_uid() + mdict["0020|000d"] = generate_uid() - date, time = datetime.now().strftime('%Y%m%d %H%M%S.%f').split() + date, time = datetime.now().strftime("%Y%m%d %H%M%S.%f").split() # Study Date - mdict['0008|0020'] = date + mdict["0008|0020"] = date # Series Date - mdict['0008|0021'] = date + mdict["0008|0021"] = date # Content Date - mdict['0008|0023'] = date + mdict["0008|0023"] = date # Study Time - mdict['0008|0030'] = time + mdict["0008|0030"] = time # Series Time - mdict['0008|0031'] = time + mdict["0008|0031"] = time # Pixel Spacing (not necessary - automatically saved) spacing = image.GetSpacing() - mdict['0028|0030'] = f'{spacing[0]}\\{spacing[1]}' + mdict["0028|0030"] = f"{spacing[0]}\\{spacing[1]}" # Spacing Between Slices - mdict['0018|0088'] = str(spacing[2]) + mdict["0018|0088"] = str(spacing[2]) # Image Orientation (Patient) - orientation_str = '\\'.join([str(image.GetDirection().GetVnlMatrix().get(i, j)) - for j in range(2) for i in range(3)]) - mdict['0020|0037'] = orientation_str + orientation_str = "\\".join( + [ + str(image.GetDirection().GetVnlMatrix().get(i, j)) + for j in range(2) + for i in range(3) + ] + ) + mdict["0020|0037"] = orientation_str # Patient Position - mdict['0018|5100'] = '' + mdict["0018|5100"] = "" # Number of Frames - mdict['0028|0008'] = '1' + mdict["0028|0008"] = "1" # Number of Slices - mdict['0054|0081'] = str(n) + mdict["0054|0081"] = str(n) # Modality - mdict['0008|0060'] = 'CT' + mdict["0008|0060"] = "CT" if metadata_dict is not None: for key, val in metadata_dict.items(): @@ -362,11 +440,11 @@ def dcm_series_metadata(image, dirname, pattern='IM{}.dcm', metadata_dict=None): # copy the shared properties dict: mdict_i = itk.MetaDataDictionary(mdict) # Instance Number - mdict_i['0020|0013'] = str(i + 1) + mdict_i["0020|0013"] = str(i + 1) # Image Position (Patient) position = image.TransformIndexToPhysicalPoint([0, 0, i]) - position_str = '\\'.join([str(position[i]) for i in range(3)]) - mdict_i['0020|0032'] = position_str + position_str = "\\".join([str(position[i]) for i in range(3)]) + mdict_i["0020|0032"] = position_str mdict_list += [mdict_i] filenames += [str(Path(dirname) / pattern.format(i + 1))] diff --git a/medio/backends/nib_io.py b/medio/backends/nib_io.py index 4bd16e0..bd3c145 100644 --- a/medio/backends/nib_io.py +++ b/medio/backends/nib_io.py @@ -8,14 +8,11 @@ class NibIO: - coord_sys = 'nib' - RGB_DTYPE = np.dtype([('R', np.uint8), - ('G', np.uint8), - ('B', np.uint8)]) - RGBA_DTYPE = np.dtype([('R', np.uint8), - ('G', np.uint8), - ('B', np.uint8), - ('A', np.uint8)]) + coord_sys = "nib" + RGB_DTYPE = np.dtype([("R", np.uint8), ("G", np.uint8), ("B", np.uint8)]) + RGBA_DTYPE = np.dtype( + [("R", np.uint8), ("G", np.uint8), ("B", np.uint8), ("A", np.uint8)] + ) @staticmethod def read_img(input_path, desired_axcodes=None, header=False, channels_axis=None): @@ -28,16 +25,20 @@ def read_img(input_path, desired_axcodes=None, header=False, channels_axis=None) :return: image array and corresponding metadata """ img_struct = nib.load(input_path) - orig_ornt_str = ''.join(nib.aff2axcodes(img_struct.affine)) + orig_ornt_str = "".join(nib.aff2axcodes(img_struct.affine)) if desired_axcodes is not None: img_struct = NibIO.reorient(img_struct, desired_axcodes) img = np.asanyarray(img_struct.dataobj) if channels_axis is not None: img = NibIO.unravel_array(img, channels_axis) affine = Affine(img_struct.affine) - metadata = MetaData(affine=affine, orig_ornt=orig_ornt_str, coord_sys=NibIO.coord_sys) + metadata = MetaData( + affine=affine, orig_ornt=orig_ornt_str, coord_sys=NibIO.coord_sys + ) if header: - metadata.header = {key: img_struct.header[key] for key in img_struct.header.keys()} + metadata.header = { + key: img_struct.header[key] for key in img_struct.header.keys() + } return img, metadata @staticmethod @@ -79,7 +80,7 @@ def unravel_array(array, channels_axis=-1): np.dtype([('R', 'uint8'), ('G', 'uint8'), ('B', 'uint8')]) Convert it into an array with dtype 'uint8' and 3 channels for RGB in an additional last dimension""" dtype = array.dtype - if not (hasattr(dtype, '__len__') and len(dtype) > 1): + if not (hasattr(dtype, "__len__") and len(dtype) > 1): return array return np.stack([array[field] for field in dtype.fields], axis=channels_axis) @@ -87,22 +88,26 @@ def unravel_array(array, channels_axis=-1): def pack_channeled_img(img, channels_axis): dtype = img.dtype if not np.issubdtype(dtype, np.uint8): - raise ValueError(f'RGB or RGBA images must have dtype "np.uint8", got: "{dtype}"') + raise ValueError( + f'RGB or RGBA images must have dtype "np.uint8", got: "{dtype}"' + ) n_channels = img.shape[channels_axis] img = np.moveaxis(img, channels_axis, -1) r_channel = img[..., 0] if n_channels == 3: img_rgb = np.empty_like(r_channel, dtype=NibIO.RGB_DTYPE) - img_rgb['R'] = r_channel - img_rgb['G'] = img[..., 1] - img_rgb['B'] = img[..., 2] + img_rgb["R"] = r_channel + img_rgb["G"] = img[..., 1] + img_rgb["B"] = img[..., 2] return img_rgb elif n_channels == 4: img_rgba = np.empty_like(r_channel, dtype=NibIO.RGBA_DTYPE) - img_rgba['R'] = r_channel - img_rgba['G'] = img[..., 1] - img_rgba['B'] = img[..., 2] - img_rgba['A'] = img[..., 3] + img_rgba["R"] = r_channel + img_rgba["G"] = img[..., 1] + img_rgba["B"] = img[..., 2] + img_rgba["A"] = img[..., 3] return img_rgba else: - raise ValueError(f'Invalid number of channels: {n_channels}, should be 3 (RGB) or 4 (RGBA)') + raise ValueError( + f"Invalid number of channels: {n_channels}, should be 3 (RGB) or 4 (RGBA)" + ) diff --git a/medio/backends/pdcm_io.py b/medio/backends/pdcm_io.py index 0a16150..00b204d 100644 --- a/medio/backends/pdcm_io.py +++ b/medio/backends/pdcm_io.py @@ -14,15 +14,22 @@ class PdcmIO: - coord_sys = 'itk' + coord_sys = "itk" # channels axes in the transposed image for pydicom and dicom-numpy. The actual axis is the first or the second # value of the tuple, according to the planar configuration (which is either 0 or 1) DEFAULT_CHANNELS_AXES_PYDICOM = (0, -1) DEFAULT_CHANNELS_AXES_DICOM_NUMPY = (0, 2) @staticmethod - def read_img(input_path, desired_ornt=None, header=False, channels_axis=None, globber='*', - allow_default_affine=False, series=None): + def read_img( + input_path, + desired_ornt=None, + header=False, + channels_axis=None, + globber="*", + allow_default_affine=False, + series=None, + ): """ Read a dicom file or folder (series) and return the numpy array and the corresponding metadata :param input_path: path-like object (str or pathlib.Path) of the file or directory to read @@ -37,13 +44,24 @@ def read_img(input_path, desired_ornt=None, header=False, channels_axis=None, gl :return: numpy array and metadata """ input_path = Path(input_path) - temp_channels_axis = -1 # if there are channels, they must be in the last axis for the reorientation + temp_channels_axis = ( + -1 + ) # if there are channels, they must be in the last axis for the reorientation if input_path.is_dir(): - img, metadata, channeled = PdcmIO.read_dcm_dir(input_path, header, globber, - channels_axis=temp_channels_axis, series=series) + img, metadata, channeled = PdcmIO.read_dcm_dir( + input_path, + header, + globber, + channels_axis=temp_channels_axis, + series=series, + ) else: img, metadata, channeled = PdcmIO.read_dcm_file( - input_path, header, allow_default_affine=allow_default_affine, channels_axis=temp_channels_axis) + input_path, + header, + allow_default_affine=allow_default_affine, + channels_axis=temp_channels_axis, + ) img, metadata = PdcmIO.reorient(img, metadata, desired_ornt) # move the channels after the reorientation if channeled and channels_axis != temp_channels_axis: @@ -51,7 +69,9 @@ def read_img(input_path, desired_ornt=None, header=False, channels_axis=None, gl return img, metadata @staticmethod - def read_dcm_file(filename, header=False, allow_default_affine=False, channels_axis=None): + def read_dcm_file( + filename, header=False, allow_default_affine=False, channels_axis=None + ): """ Read a single dicom file. Return the image array, metadata, and whether it has channels @@ -66,13 +86,19 @@ def read_dcm_file(filename, header=False, allow_default_affine=False, channels_a if header: metadata.header = {str(key): ds[key] for key in ds.keys()} samples_per_pixel = ds.SamplesPerPixel - img = PdcmIO.move_channels_axis(img, samples_per_pixel=samples_per_pixel, channels_axis=channels_axis, - planar_configuration=ds.get('PlanarConfiguration', None), - default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_PYDICOM) + img = PdcmIO.move_channels_axis( + img, + samples_per_pixel=samples_per_pixel, + channels_axis=channels_axis, + planar_configuration=ds.get("PlanarConfiguration", None), + default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_PYDICOM, + ) return img, metadata, samples_per_pixel > 1 @staticmethod - def read_dcm_dir(input_dir, header=False, globber='*', channels_axis=None, series=None): + def read_dcm_dir( + input_dir, header=False, globber="*", channels_axis=None, series=None + ): """ Reads a 3D dicom image: input path can be a file or directory (DICOM series). Return the image array, metadata, and whether it has channels @@ -84,15 +110,21 @@ def read_dcm_dir(input_dir, header=False, globber='*', channels_axis=None, serie if header: # TODO: add header support, something like # metdata.header = [{str(key): ds[key] for key in ds.keys()} for ds in slices] - raise NotImplementedError("header=True is currently not supported for a series") + raise NotImplementedError( + "header=True is currently not supported for a series" + ) samples_per_pixel = slices[0].SamplesPerPixel - img = PdcmIO.move_channels_axis(img, samples_per_pixel=samples_per_pixel, channels_axis=channels_axis, - planar_configuration=slices[0].get('PlanarConfiguration', None), - default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_DICOM_NUMPY) + img = PdcmIO.move_channels_axis( + img, + samples_per_pixel=samples_per_pixel, + channels_axis=channels_axis, + planar_configuration=slices[0].get("PlanarConfiguration", None), + default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_DICOM_NUMPY, + ) return img, metadata, samples_per_pixel > 1 @staticmethod - def extract_slices(input_dir, globber='*', series=None): + def extract_slices(input_dir, globber="*", series=None): """Extract slices from input_dir and return them sorted""" files = Path(input_dir).glob(globber) slices = [pydicom.dcmread(filename) for filename in files] @@ -106,7 +138,7 @@ def extract_slices(input_dir, globber='*', series=None): series_uid = parse_series_uids(input_dir, datasets.keys(), series, globber) slices = datasets[series_uid] - slices.sort(key=lambda ds: ds.get('InstanceNumber', 0)) + slices.sort(key=lambda ds: ds.get("InstanceNumber", 0)) return slices @staticmethod @@ -114,8 +146,13 @@ def aff2meta(affine): return MetaData(affine, coord_sys=PdcmIO.coord_sys) @staticmethod - def move_channels_axis(array, samples_per_pixel, channels_axis=None, planar_configuration=None, - default_axes=DEFAULT_CHANNELS_AXES_PYDICOM): + def move_channels_axis( + array, + samples_per_pixel, + channels_axis=None, + planar_configuration=None, + default_axes=DEFAULT_CHANNELS_AXES_PYDICOM, + ): """Move the channels axis from the original axis to the destined channels_axis""" if (samples_per_pixel == 1) or (channels_axis is None): # no rearrangement is needed @@ -123,7 +160,9 @@ def move_channels_axis(array, samples_per_pixel, channels_axis=None, planar_conf # extract the original channels axis if planar_configuration not in [0, 1]: - raise ValueError(f'Invalid Planar Configuration value: {planar_configuration}') + raise ValueError( + f"Invalid Planar Configuration value: {planar_configuration}" + ) orig_axis = default_axes[planar_configuration] flag = True # original channels axis is assigned @@ -138,7 +177,7 @@ def move_channels_axis(array, samples_per_pixel, channels_axis=None, planar_conf break if not flag: - raise ValueError('The original channels axis was not detected') + raise ValueError("The original channels axis was not detected") return np.moveaxis(array, orig_axis, channels_axis) @@ -160,14 +199,20 @@ def reorient(img, metadata, desired_ornt): reoriented_img_struct = NibIO.reorient(img_struct, desired_ornt) img = np.asanyarray(reoriented_img_struct.dataobj) - metadata = MetaData(reoriented_img_struct.affine, orig_ornt=orig_ornt, coord_sys=NibIO.coord_sys, - header=metadata.header) + metadata = MetaData( + reoriented_img_struct.affine, + orig_ornt=orig_ornt, + coord_sys=NibIO.coord_sys, + header=metadata.header, + ) # convert back to pydicom convention metadata.convert(PdcmIO.coord_sys) return img, metadata @staticmethod - def save_arr2dcm_file(output_filename, template_filename, img_arr, dtype=None, keep_rescale=False): + def save_arr2dcm_file( + output_filename, template_filename, img_arr, dtype=None, keep_rescale=False + ): """ Writes a dicom single file image using template file, without the intensity transformation from template dataset unless keep_rescale is True diff --git a/medio/backends/pdcm_unpack_ds.py b/medio/backends/pdcm_unpack_ds.py index 0454533..e938fd4 100644 --- a/medio/backends/pdcm_unpack_ds.py +++ b/medio/backends/pdcm_unpack_ds.py @@ -4,7 +4,11 @@ import logging import numpy as np -from dicom_numpy.combine_slices import _validate_image_orientation, _extract_cosines, _requires_rescaling +from dicom_numpy.combine_slices import ( + _validate_image_orientation, + _extract_cosines, + _requires_rescaling, +) logger = logging.getLogger(__name__) @@ -81,9 +85,11 @@ def _unpack_pixel_array(dataset, rescale=None): rescale = _requires_rescaling(dataset) if rescale: - voxels = voxels.astype('int16', copy=False) # TODO: it takes time! Consider view. - slope = getattr(dataset, 'RescaleSlope', 1) - intercept = getattr(dataset, 'RescaleIntercept', 0) + voxels = voxels.astype( + "int16", copy=False + ) # TODO: it takes time! Consider view. + slope = getattr(dataset, "RescaleSlope", 1) + intercept = getattr(dataset, "RescaleIntercept", 0) if int(slope) == slope and int(intercept) == intercept: slope = int(slope) intercept = int(intercept) @@ -103,8 +109,9 @@ def _ijk_to_patient_xyz_transform_matrix(dataset): transform[:3, 0] = row_cosine * column_spacing transform[:3, 1] = column_cosine * row_spacing - transform[:3, 2] = (np.array(dataset.slice_position(-1)) - dataset.slice_position(0) - ) / (dataset.NumberOfFrames - 1) + transform[:3, 2] = ( + np.array(dataset.slice_position(-1)) - dataset.slice_position(0) + ) / (dataset.NumberOfFrames - 1) # transform[:3, 2] = slice_cosine * slice_spacing transform[:3, 3] = dataset.ImagePositionPatient diff --git a/medio/metadata/affine.py b/medio/metadata/affine.py index 5dd92c6..a450f4d 100644 --- a/medio/metadata/affine.py +++ b/medio/metadata/affine.py @@ -33,7 +33,7 @@ def __new__(cls, affine=None, *, direction=None, spacing=None, origin=None): affine = cls.construct_affine(direction, spacing, origin) obj = np.asarray(affine).view(cls) # return array view of type Affine return obj - + def __init__(self, affine=None, *, direction=None, spacing=None, origin=None): self.dim = self.shape[0] - 1 if affine is None: @@ -74,7 +74,9 @@ def spacing(self): def spacing(self, value): value = np.asarray(value) self._m_matrix = self._m_matrix @ np.diag(value / self._spacing) - self._spacing = np.abs(value) # the spacing must be positive (or at least nonnegative) + self._spacing = np.abs( + value + ) # the spacing must be positive (or at least nonnegative) @property def direction(self): @@ -124,4 +126,8 @@ def affine2direction(affine, spacing=None): def affine2comps(affine, spacing=None): if spacing is None: spacing = Affine.affine2spacing(affine) - return Affine.affine2direction(affine, spacing), spacing, Affine.affine2origin(affine) + return ( + Affine.affine2direction(affine, spacing), + spacing, + Affine.affine2origin(affine), + ) diff --git a/medio/metadata/convert_nib_itk.py b/medio/metadata/convert_nib_itk.py index 25cce92..00233c0 100644 --- a/medio/metadata/convert_nib_itk.py +++ b/medio/metadata/convert_nib_itk.py @@ -35,16 +35,16 @@ # store compactly axis directions codes axes_inv = TwoWayDict() -axes_inv['R'] = 'L' -axes_inv['A'] = 'P' -axes_inv['S'] = 'I' +axes_inv["R"] = "L" +axes_inv["A"] = "P" +axes_inv["S"] = "I" def inv_axcodes(axcodes): """Inverse axes codes chars, for example: SPL -> IAR""" if axcodes is None: return None - new_axcodes = '' + new_axcodes = "" for code in axcodes: new_axcodes += axes_inv[code] return new_axcodes diff --git a/medio/metadata/dcm_uid.py b/medio/metadata/dcm_uid.py index 2e6cc79..7259962 100644 --- a/medio/metadata/dcm_uid.py +++ b/medio/metadata/dcm_uid.py @@ -3,7 +3,7 @@ from pydicom.uid import generate_uid # Given by Medical Connections (http://www.medicalconnections.co.uk/FreeUID.html) -MEDIO_ROOT_UID = '1.2.826.0.1.3680043.10.513.' +MEDIO_ROOT_UID = "1.2.826.0.1.3680043.10.513." generate_uid = partial(generate_uid, prefix=MEDIO_ROOT_UID) diff --git a/medio/metadata/itk_orientation.py b/medio/metadata/itk_orientation.py index be5efdb..47cea82 100644 --- a/medio/metadata/itk_orientation.py +++ b/medio/metadata/itk_orientation.py @@ -32,22 +32,25 @@ class ItkOrientationCode: def itk_orientation_code(ax_code): """ax_code is string or tuple of valid orientation, e.g. 'LPI', ('A', 'R', 'S')""" prime, second, tertiary = [getattr(AxCodes, axis) for axis in ax_code] - return (prime << AxMajorness.Primary) + (second << AxMajorness.Secondary) + (tertiary << AxMajorness.Tertiary) + return ( + (prime << AxMajorness.Primary) + + (second << AxMajorness.Secondary) + + (tertiary << AxMajorness.Tertiary) + ) # adding all 48 possible orientation codes to ItkOrientationCode class ax_codes_iter = itertools.chain( - *map(itertools.permutations, - itertools.product(('R', 'L'), ('A', 'P'), ('I', 'S')) - )) + *map(itertools.permutations, itertools.product(("R", "L"), ("A", "P"), ("I", "S"))) +) # two way dictionary that translates itk numerical orientation codes to orientation strings and vice versa codes_str_dict = TwoWayDict() codes_str_dict[None] = None for ax_code in ax_codes_iter: - ax_code_str = ''.join(ax_code) + ax_code_str = "".join(ax_code) code = itk_orientation_code(ax_code) setattr(ItkOrientationCode, ax_code_str, code) codes_str_dict[ax_code_str] = code diff --git a/medio/metadata/metadata.py b/medio/metadata/metadata.py index 3682804..9a4a360 100644 --- a/medio/metadata/metadata.py +++ b/medio/metadata/metadata.py @@ -9,7 +9,7 @@ class MetaData: - def __init__(self, affine, orig_ornt=None, coord_sys='itk', header=None): + def __init__(self, affine, orig_ornt=None, coord_sys="itk", header=None): """ Initialize medical image's metadata :param affine: affine matrix of class Affine, numpy float array of shape (4, 4) @@ -29,19 +29,21 @@ def __init__(self, affine, orig_ornt=None, coord_sys='itk', header=None): @staticmethod def check_valid_coord_sys(coord_sys): - if coord_sys not in ('itk', 'nib'): + if coord_sys not in ("itk", "nib"): raise ValueError('Metadata coord_sys must be "itk" or "nib"') def __repr__(self): - sep = ' ' if self.header is None else '\n' - return (f'Affine:\n' - f'{self.affine}\n' - f'Spacing: {self.spacing}\n' - f'Coordinate system: {self.coord_sys}\n' - f'Orientation: {self.ornt}\n' - f'Original orientation: {self.orig_ornt}\n' - f'Header:{sep}' - f'{pprint.pformat(self.header, indent=4)}') + sep = " " if self.header is None else "\n" + return ( + f"Affine:\n" + f"{self.affine}\n" + f"Spacing: {self.spacing}\n" + f"Coordinate system: {self.coord_sys}\n" + f"Orientation: {self.ornt}\n" + f"Original orientation: {self.orig_ornt}\n" + f"Header:{sep}" + f"{pprint.pformat(self.header, indent=4)}" + ) def convert(self, dest_coord_sys): """ @@ -50,22 +52,28 @@ def convert(self, dest_coord_sys): """ self.check_valid_coord_sys(dest_coord_sys) if dest_coord_sys != self.coord_sys: - self.affine, self._ornt, self.orig_ornt = convert_nib_itk(self.affine, self._ornt, self.orig_ornt) + self.affine, self._ornt, self.orig_ornt = convert_nib_itk( + self.affine, self._ornt, self.orig_ornt + ) self.coord_sys = dest_coord_sys def clone(self): - return MetaData(affine=self.affine.clone(), orig_ornt=self.orig_ornt, coord_sys=self.coord_sys, - header=deepcopy(self.header)) + return MetaData( + affine=self.affine.clone(), + orig_ornt=self.orig_ornt, + coord_sys=self.coord_sys, + header=deepcopy(self.header), + ) def get_ornt(self): """Returns current orientation based on the affine and coordinate system""" - if self.coord_sys == 'nib': + if self.coord_sys == "nib": ornt_tup = aff2axcodes(self.affine) - elif self.coord_sys == 'itk': + elif self.coord_sys == "itk": ornt_tup = inv_axcodes(aff2axcodes(convert_affine(self.affine))) else: raise ValueError(f'Invalid coord_sys: "{self.coord_sys}"') - ornt_str = ''.join(ornt_tup) + ornt_str = "".join(ornt_tup) return ornt_str @property @@ -87,7 +95,7 @@ def is_right_handed_ornt(self): +1 (-1) indicates right (left) handed orientation. To be used primarily before saving a dicom file or series""" if self.affine.dim != 3: - raise ValueError('Right handed orientation is relevant only to a 3d space') + raise ValueError("Right handed orientation is relevant only to a 3d space") return np.linalg.det(self.affine.direction) > 0 @@ -96,13 +104,14 @@ def is_right_handed_axcodes(axcodes): return True if len(axcodes) != 3: raise ValueError(f'Invalid axcodes (not length 3 or 2): "{axcodes}"') - letter_vec_dict = {'R': [1, 0, 0], - 'L': [-1, 0, 0], - 'A': [0, 1, 0], - 'P': [0, -1, 0], - 'I': [0, 0, 1], - 'S': [0, 0, -1] - } + letter_vec_dict = { + "R": [1, 0, 0], + "L": [-1, 0, 0], + "A": [0, 1, 0], + "P": [0, -1, 0], + "I": [0, 0, 1], + "S": [0, 0, -1], + } u, v, n = [letter_vec_dict[letter] for letter in axcodes] ornt_sign = np.dot(np.cross(u, v), n) if ornt_sign not in (-1, 1): @@ -131,7 +140,9 @@ def check_dcm_ornt(desired_ornt, metadata, allow_dcm_reorient=False): if allow_dcm_reorient: return right_handed_ornt else: - raise ValueError(f'The desired orientation "{desired_ornt}" is left handed, whereas saving dicom is ' - f'possible only with a right handed orientation. \nYou can either pass the saver ' - f'parameter allow_dcm_reorient=True to allow automatic reorientation (in this case to ' - f'"{right_handed_ornt}"), or \nreorient yourself before saving the image as a dicom.') + raise ValueError( + f'The desired orientation "{desired_ornt}" is left handed, whereas saving dicom is ' + f"possible only with a right handed orientation. \nYou can either pass the saver " + f"parameter allow_dcm_reorient=True to allow automatic reorientation (in this case to " + f'"{right_handed_ornt}"), or \nreorient yourself before saving the image as a dicom.' + ) diff --git a/medio/metadata/pdcm_ds.py b/medio/metadata/pdcm_ds.py index 7f581c9..c027062 100644 --- a/medio/metadata/pdcm_ds.py +++ b/medio/metadata/pdcm_ds.py @@ -11,49 +11,83 @@ def convert_ds(dataset): >>> ds = convert_ds(ds) >>> print(ds.PixelSpacing) """ - if dataset.get('NumberOfFrames', 1) > 1: + if dataset.get("NumberOfFrames", 1) > 1: dataset.__class__ = MultiFrameFileDataset return dataset class MultiFrameFileDataset(FileDataset): """This class enables shorter access to basic properties of pydicom dataset of a certain type""" + @property def ImageOrientationPatient(self): - return self.SharedFunctionalGroupsSequence[0].PlaneOrientationSequence[0].ImageOrientationPatient + return ( + self.SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) @property def PixelSpacing(self): - return self.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].PixelSpacing + return ( + self.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].PixelSpacing + ) @property def SpacingBetweenSlices(self): - return self.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].SpacingBetweenSlices + return ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) @property def SliceThickness(self): - return self.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].SliceThickness + return ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SliceThickness + ) @property def RescaleIntercept(self): - return self.SharedFunctionalGroupsSequence[0].PixelValueTransformationSequence[0].RescaleIntercept + return ( + self.SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence[0] + .RescaleIntercept + ) @property def RescaleSlope(self): - return self.SharedFunctionalGroupsSequence[0].PixelValueTransformationSequence[0].RescaleSlope + return ( + self.SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence[0] + .RescaleSlope + ) @property def ImagePositionPatient(self): """Note: this property returns only the position of the first slice""" - return self.PerFrameFunctionalGroupsSequence[0].PlanePositionSequence[0].ImagePositionPatient + return ( + self.PerFrameFunctionalGroupsSequence[0] + .PlanePositionSequence[0] + .ImagePositionPatient + ) def slice_positions(self): """Return a list of the slices' position""" - return [seq.PlanePositionSequence[0].ImagePositionPatient for seq in self.PerFrameFunctionalGroupsSequence] + return [ + seq.PlanePositionSequence[0].ImagePositionPatient + for seq in self.PerFrameFunctionalGroupsSequence + ] def slice_position(self, index): """Return the slice position according to the slice index""" - return self.PerFrameFunctionalGroupsSequence[index].PlanePositionSequence[0].ImagePositionPatient + return ( + self.PerFrameFunctionalGroupsSequence[index] + .PlanePositionSequence[0] + .ImagePositionPatient + ) def del_intensity_trans(self): """Delete the pixel value transformation sequence from dataset""" diff --git a/medio/read_save.py b/medio/read_save.py index 46be03b..6f871f0 100644 --- a/medio/read_save.py +++ b/medio/read_save.py @@ -7,8 +7,16 @@ from medio.utils.files import is_nifti -def read_img(input_path, desired_ornt=None, backend=None, dtype=None, header=False, channels_axis=-1, - coord_sys='itk', **kwargs): +def read_img( + input_path, + desired_ornt=None, + backend=None, + dtype=None, + header=False, + channels_axis=-1, + coord_sys="itk", + **kwargs +): """ Read medical image with nibabel or itk :param input_path: str or os.PathLike, the input path of image file or a directory containing dicom series @@ -34,19 +42,23 @@ def read_img(input_path, desired_ornt=None, backend=None, dtype=None, header=Fal else: reader, reader_sys = itk_reader_data else: - if backend == 'nib': + if backend == "nib": reader, reader_sys = nib_reader_data - elif backend == 'itk': + elif backend == "itk": reader, reader_sys = itk_reader_data - elif backend in ('pdcm', 'pydicom'): + elif backend in ("pdcm", "pydicom"): reader, reader_sys = pdcm_reader_data else: - raise ValueError('The backend argument must be one of: "itk", "nib", "pdcm" (or "pydicom"), None') + raise ValueError( + 'The backend argument must be one of: "itk", "nib", "pdcm" (or "pydicom"), None' + ) if (coord_sys is not None) and (coord_sys != reader_sys): desired_ornt = inv_axcodes(desired_ornt) - np_image, metadata = reader(input_path, desired_ornt, header, channels_axis, **kwargs) + np_image, metadata = reader( + input_path, desired_ornt, header, channels_axis, **kwargs + ) if dtype is not None: np_image = np_image.astype(dtype, copy=False) @@ -55,8 +67,18 @@ def read_img(input_path, desired_ornt=None, backend=None, dtype=None, header=Fal return np_image, metadata -def save_img(filename, np_image, metadata, use_original_ornt=True, backend=None, dtype=None, channels_axis=None, - mkdir=False, parents=False, **kwargs): +def save_img( + filename, + np_image, + metadata, + use_original_ornt=True, + backend=None, + dtype=None, + channels_axis=None, + mkdir=False, + parents=False, + **kwargs +): """ Save numpy image with corresponding metadata to file :param filename: str or os.PathLike, the output filename @@ -77,9 +99,9 @@ def save_img(filename, np_image, metadata, use_original_ornt=True, backend=None, else: writer = itk_writer else: - if backend == 'nib': + if backend == "nib": writer = nib_writer - elif backend == 'itk': + elif backend == "itk": writer = itk_writer else: raise ValueError('The backend argument must be one of: "itk", "nib", None') @@ -90,13 +112,32 @@ def save_img(filename, np_image, metadata, use_original_ornt=True, backend=None, writer(filename, np_image, metadata, use_original_ornt, channels_axis, **kwargs) -def save_dir(dirname, np_image, metadata, use_original_ornt=True, dtype=None, channels_axis=None, parents=False, - exist_ok=False, allow_dcm_reorient=False, **kwargs): +def save_dir( + dirname, + np_image, + metadata, + use_original_ornt=True, + dtype=None, + channels_axis=None, + parents=False, + exist_ok=False, + allow_dcm_reorient=False, + **kwargs +): """ Save image as a dicom directory. See medio.backends.itk_io.ItkIO.save_dcm_dir documentation. dtype is equivalent to passing image_np.astype(dtype) if dtype is not None """ if dtype is not None: np_image = np_image.astype(dtype, copy=False) - ItkIO.save_dcm_dir(dirname, np_image, metadata, use_original_ornt, channels_axis, parents, exist_ok, - allow_dcm_reorient, **kwargs) + ItkIO.save_dcm_dir( + dirname, + np_image, + metadata, + use_original_ornt, + channels_axis, + parents, + exist_ok, + allow_dcm_reorient, + **kwargs + ) diff --git a/medio/utils/files.py b/medio/utils/files.py index d293b5f..2fd0879 100644 --- a/medio/utils/files.py +++ b/medio/utils/files.py @@ -16,11 +16,17 @@ def is_file_suffix(filename, suffixes, check_exist=True): def is_nifti(filename, check_exist=True): - return is_file_suffix(filename, ('.nii.gz', '.nii', '.img.gz', '.img', '.hdr'), check_exist=check_exist) + return is_file_suffix( + filename, + (".nii.gz", ".nii", ".img.gz", ".img", ".hdr"), + check_exist=check_exist, + ) def is_dicom(filename, check_exist=True): - return is_file_suffix(filename, ('.dcm', '.dicom', '.DCM', '.DICOM'), check_exist=check_exist) + return is_file_suffix( + filename, (".dcm", ".dicom", ".DCM", ".DICOM"), check_exist=check_exist + ) def make_empty_dir(dir_path, parents=False): @@ -31,7 +37,7 @@ def make_empty_dir(dir_path, parents=False): except FileExistsError: # the directory exists try: - next(dir_path.glob('*')) + next(dir_path.glob("*")) except StopIteration: pass # the directory exists but empty - ok else: @@ -51,8 +57,10 @@ def parse_series_uids(input_dir, series_uids, series=None, globber=None): keys = sorted(series_uids) num_series = len(keys) if num_series == 0: - raise FileNotFoundError(f'No DICOMs in:\n"{input_dir}"' + ( - f'\nwith globber="{globber}"' if globber is not None else '')) + raise FileNotFoundError( + f'No DICOMs in:\n"{input_dir}"' + + (f'\nwith globber="{globber}"' if globber is not None else "") + ) if num_series == 1: return keys[0] @@ -60,18 +68,22 @@ def parse_series_uids(input_dir, series_uids, series=None, globber=None): # if there is more than a single series if num_series > 1: if series is None: - raise ValueError(f'The directory: "{input_dir}"\n' - 'contains more than a single DICOM series. ' - 'The following series were identified according to their Series Instance UID:\n' - f'{pprint.pformat(keys)}\n' - 'Try passing: series=series_uid, where series_uid is a one of the strings above,\n' - f'or an integer between 0 and {num_series - 1} corresponding to one of them.') + raise ValueError( + f'The directory: "{input_dir}"\n' + "contains more than a single DICOM series. " + "The following series were identified according to their Series Instance UID:\n" + f"{pprint.pformat(keys)}\n" + "Try passing: series=series_uid, where series_uid is a one of the strings above,\n" + f"or an integer between 0 and {num_series - 1} corresponding to one of them." + ) elif isinstance(series, int): return keys[series] else: if series not in keys: - raise ValueError("The series:\n" - f"'{series}'\n" - "is not one of the following:" - f"\n{pprint.pformat(keys)}") + raise ValueError( + "The series:\n" + f"'{series}'\n" + "is not one of the following:" + f"\n{pprint.pformat(keys)}" + ) return series diff --git a/medio/utils/two_way_dict.py b/medio/utils/two_way_dict.py index 27478ce..8ce2bbd 100644 --- a/medio/utils/two_way_dict.py +++ b/medio/utils/two_way_dict.py @@ -1,5 +1,6 @@ class TwoWayDict(dict): """Dictionary which contains key-value + value-key pairs: {key: value, value: key}""" + def __setitem__(self, key, value): # Remove any previous connections with these values if key in self: diff --git a/tests/itk_dcm_orientations/itk_dcm_orientations.py b/tests/itk_dcm_orientations/itk_dcm_orientations.py index 503a31e..ed1767f 100644 --- a/tests/itk_dcm_orientations/itk_dcm_orientations.py +++ b/tests/itk_dcm_orientations/itk_dcm_orientations.py @@ -5,13 +5,19 @@ import pandas as pd from tests.itk_dcm_orientations.orientations_utils import ( - ornt_list, ornt_direction_dict, is_right_handed_ornt, direction2ornt + ornt_list, + ornt_direction_dict, + is_right_handed_ornt, + direction2ornt, +) +from tests.itk_dcm_orientations.itk_utils import ( + set_image_direction, + get_image_direction, ) -from tests.itk_dcm_orientations.itk_utils import set_image_direction, get_image_direction def get_saved_ornt(image, desired_ornt, remove=True): - filename = desired_ornt + '.dcm' + filename = desired_ornt + ".dcm" desired_direction = ornt_direction_dict[desired_ornt] set_image_direction(image, desired_direction) itk.imwrite(image, filename) @@ -27,41 +33,48 @@ def get_saved_ornt(image, desired_ornt, remove=True): return saved_ornt, is_equal -arr = np.random.randint(0, 256, size=(10, 30, 25), dtype='uint8') # (slices, cols, rows) -arr_rgb = np.random.randint(0, 256, size=(10, 30, 25, 3), dtype='uint8') # (slices, cols, rows, channels) +arr = np.random.randint( + 0, 256, size=(10, 30, 25), dtype="uint8" +) # (slices, cols, rows) +arr_rgb = np.random.randint( + 0, 256, size=(10, 30, 25, 3), dtype="uint8" +) # (slices, cols, rows, channels) img = itk.image_from_array(arr) img_rgb = itk.image_from_array(arr_rgb, is_vector=True) df = pd.DataFrame( - columns= - [ - 'Orientation', - 'Right/Left-handed orientation', - 'Success', - 'Saved orientation', - 'RGB success', - 'RGB saved orientation', - ] + columns=[ + "Orientation", + "Right/Left-handed orientation", + "Success", + "Saved orientation", + "RGB success", + "RGB saved orientation", + ] ) for ornt in ornt_list: state_dict = dict.fromkeys(df.columns) - state_dict['Orientation'] = ornt - state_dict['Right/Left-handed orientation'] = 'R' if is_right_handed_ornt(ornt) else 'L' + state_dict["Orientation"] = ornt + state_dict["Right/Left-handed orientation"] = ( + "R" if is_right_handed_ornt(ornt) else "L" + ) # test 3d dicom - state_dict['Saved orientation'], state_dict['Success'] = get_saved_ornt(img, ornt) + state_dict["Saved orientation"], state_dict["Success"] = get_saved_ornt(img, ornt) # test 3d RGB dicom - state_dict['RGB saved orientation'], state_dict['RGB success'] = get_saved_ornt(img_rgb, ornt) + state_dict["RGB saved orientation"], state_dict["RGB success"] = get_saved_ornt( + img_rgb, ornt + ) df = df.append(state_dict, ignore_index=True) -df.sort_values('Right/Left-handed orientation', ascending=False, inplace=True) -df.to_csv('itk_dcm_orientations.csv', index=False) +df.sort_values("Right/Left-handed orientation", ascending=False, inplace=True) +df.to_csv("itk_dcm_orientations.csv", index=False) -right_handed = np.array(df['Right/Left-handed orientation'] == 'R') -success = np.array(df['Success']) -rgb_success = np.array(df['RGB success']) -rai_ornt = np.array(df['Orientation'] == 'RAI') +right_handed = np.array(df["Right/Left-handed orientation"] == "R") +success = np.array(df["Success"]) +rgb_success = np.array(df["RGB success"]) +rai_ornt = np.array(df["Orientation"] == "RAI") -print('Right-handed == Success:', np.array_equal(right_handed, success)) -print('Right-handed == RGB success:', np.array_equal(right_handed, rgb_success)) +print("Right-handed == Success:", np.array_equal(right_handed, success)) +print("Right-handed == RGB success:", np.array_equal(right_handed, rgb_success)) print('"RAI" orientation == RGB success:', np.array_equal(rai_ornt, rgb_success)) diff --git a/tests/itk_dcm_orientations/itk_utils.py b/tests/itk_dcm_orientations/itk_utils.py index 52f7095..872d110 100644 --- a/tests/itk_dcm_orientations/itk_utils.py +++ b/tests/itk_dcm_orientations/itk_utils.py @@ -3,7 +3,9 @@ def set_image_direction(image, direction): dim = 3 - direction_vnl_mat = itk.vnl_matrix_from_array(direction.astype('float').copy()) # copy is crucial for the float + direction_vnl_mat = itk.vnl_matrix_from_array( + direction.astype("float").copy() + ) # copy is crucial for the float direction_itk = itk.Matrix[itk.D, dim, dim](direction_vnl_mat) image.SetDirection(direction_itk) diff --git a/tests/itk_dcm_orientations/orientations_utils.py b/tests/itk_dcm_orientations/orientations_utils.py index 025cbca..2812c9c 100644 --- a/tests/itk_dcm_orientations/orientations_utils.py +++ b/tests/itk_dcm_orientations/orientations_utils.py @@ -2,13 +2,14 @@ import numpy as np -letter_vec_dict = {'R': [1, 0, 0], - 'L': [-1, 0, 0], - 'A': [0, 1, 0], - 'P': [0, -1, 0], - 'I': [0, 0, 1], - 'S': [0, 0, -1] - } +letter_vec_dict = { + "R": [1, 0, 0], + "L": [-1, 0, 0], + "A": [0, 1, 0], + "P": [0, -1, 0], + "I": [0, 0, 1], + "S": [0, 0, -1], +} def ornt2direction(ornt): @@ -16,16 +17,15 @@ def ornt2direction(ornt): ornt_iter = itertools.chain( - *map(itertools.permutations, - itertools.product(('R', 'L'), ('A', 'P'), ('I', 'S')) - )) + *map(itertools.permutations, itertools.product(("R", "L"), ("A", "P"), ("I", "S"))) +) # dictionary that translates itk orientation codes to direction matrices ornt_direction_dict = dict() ornt_list = [] for ornt_tup in ornt_iter: - ornt = ''.join(ornt_tup) + ornt = "".join(ornt_tup) ornt_list += [ornt] ornt_direction_dict[ornt] = ornt2direction(ornt) @@ -41,4 +41,4 @@ def direction2ornt(direction): for key, val in ornt_direction_dict.items(): if np.array_equal(direction, val): return key - raise ValueError('Invalid direction') + raise ValueError("Invalid direction") From d593dd226e32c9ae2df9cb298bf2ae800b2c5b9b Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 20 Feb 2022 22:21:49 +0200 Subject: [PATCH 3/5] Poetry update --- poetry.lock | 84 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8b34aab..a06b754 100644 --- a/poetry.lock +++ b/poetry.lock @@ -23,7 +23,7 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "click" -version = "8.0.3" +version = "8.0.4" description = "Composable command line interface toolkit" category = "dev" optional = false @@ -59,7 +59,7 @@ test = ["coverage", "pytest"] [[package]] name = "importlib-metadata" -version = "4.10.1" +version = "4.11.1" description = "Read metadata from Python packages" category = "dev" optional = false @@ -72,7 +72,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] [[package]] name = "itk" @@ -167,7 +167,7 @@ python-versions = "*" [[package]] name = "nibabel" -version = "3.2.1" +version = "3.2.2" description = "Access a multitude of neuroimaging data formats" category = "main" optional = false @@ -178,15 +178,23 @@ numpy = ">=1.14" packaging = ">=14.3" [package.extras] -all = ["pydicom (>=0.9.9)", "pillow", "gitpython", "twine", "matplotlib (>=1.5.3)", "numpydoc", "sphinx (>=0.3,<3)", "texext", "h5py", "scipy", "flake8", "coverage", "pytest (!=5.3.4)", "pytest-cov", "pytest-doctestplus"] +all = ["pydicom (>=1.0.0)", "pillow", "gitpython", "twine", "matplotlib (>=1.5.3)", "numpydoc", "sphinx (>=0.3,<3)", "texext", "h5py", "scipy", "flake8", "coverage", "pytest (!=5.3.4)", "pytest-cov", "pytest-doctestplus (!=0.9.0)", "pytest (<7)"] dev = ["gitpython", "twine"] -dicom = ["pydicom (>=0.9.9)"] -dicomfs = ["pydicom (>=0.9.9)", "pillow"] +dicom = ["pydicom (>=1.0.0)"] +dicomfs = ["pydicom (>=1.0.0)", "pillow"] doc = ["matplotlib (>=1.5.3)", "numpydoc", "sphinx (>=0.3,<3)", "texext"] minc2 = ["h5py"] spm = ["scipy"] style = ["flake8"] -test = ["coverage", "pytest (!=5.3.4)", "pytest-cov", "pytest-doctestplus"] +test = ["coverage", "pytest (!=5.3.4)", "pytest-cov", "pytest-doctestplus (!=0.9.0)", "pytest (<7)"] + +[[package]] +name = "numpy" +version = "1.21.5" +description = "NumPy is the fundamental package for array computing with Python." +category = "main" +optional = false +python-versions = ">=3.7,<3.11" [[package]] name = "numpy" @@ -217,7 +225,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [[package]] name = "platformdirs" -version = "2.4.1" +version = "2.5.1" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false @@ -248,7 +256,7 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "tomli" -version = "2.0.0" +version = "2.0.1" description = "A lil' TOML parser" category = "dev" optional = false @@ -264,7 +272,7 @@ python-versions = ">=3.6" [[package]] name = "typing-extensions" -version = "4.0.1" +version = "4.1.1" description = "Backported and Experimental Type Hints for Python 3.6+" category = "dev" optional = false @@ -314,8 +322,8 @@ black = [ {file = "black-22.1.0.tar.gz", hash = "sha256:a7c0192d35635f6fc1174be575cb7915e92e5dd629ee79fdaf0dcfa41a80afb5"}, ] click = [ - {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, - {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, + {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"}, + {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"}, ] colorama = [ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, @@ -326,8 +334,8 @@ dicom-numpy = [ {file = "dicom_numpy-0.6.2.tar.gz", hash = "sha256:24b993083368efb868ffe5edcab054db5c11f0587a218a6b6492fde14a87acd9"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, - {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, + {file = "importlib_metadata-4.11.1-py3-none-any.whl", hash = "sha256:e0bc84ff355328a4adfc5240c4f211e0ab386f80aa640d1b11f0618a1d282094"}, + {file = "importlib_metadata-4.11.1.tar.gz", hash = "sha256:175f4ee440a0317f6e8d81b7f8d4869f93316170a65ad2b007d2929186c8052c"}, ] itk = [ {file = "itk-5.2.1.post1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d18723ca6791fc5d9c7498e03d73929df56acffd9290ed8f61a24f25a138951e"}, @@ -467,10 +475,40 @@ mypy-extensions = [ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] nibabel = [ - {file = "nibabel-3.2.1-py3-none-any.whl", hash = "sha256:7e26cbf60eae8668785fa970294f05f767cefc5538b9e22aa388a07f62c54ebc"}, - {file = "nibabel-3.2.1.tar.gz", hash = "sha256:4d2ff9426b740011a1c916b54fc25da9348282e727eaa2ea163f42e00f1fc29e"}, + {file = "nibabel-3.2.2-py3-none-any.whl", hash = "sha256:7df7a2733461441d3aacc61f36f5e100ec533d43ed09a191293bb4ca5a4f10f6"}, + {file = "nibabel-3.2.2.tar.gz", hash = "sha256:b0dcc174b30405ce9e8fec1eab3cbbb20f5c5e4920976c08b22e050b7c124f94"}, ] numpy = [ + {file = "numpy-1.21.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:301e408a052fdcda5cdcf03021ebafc3c6ea093021bf9d1aa47c54d48bdad166"}, + {file = "numpy-1.21.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7e8f6216f180f3fd4efb73de5d1eaefb5f5a1ee5b645c67333033e39440e63a"}, + {file = "numpy-1.21.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc7a7d7b0ed72589fd8b8486b9b42a564f10b8762be8bd4d9df94b807af4a089"}, + {file = "numpy-1.21.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58ca1d7c8aef6e996112d0ce873ac9dfa1eaf4a1196b4ff7ff73880a09923ba7"}, + {file = "numpy-1.21.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4b2fb01f1b4ddbe2453468ea0719f4dbb1f5caa712c8b21bb3dd1480cd30d9"}, + {file = "numpy-1.21.5-cp310-cp310-win_amd64.whl", hash = "sha256:cc1b30205d138d1005adb52087ff45708febbef0e420386f58664f984ef56954"}, + {file = "numpy-1.21.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:08de8472d9f7571f9d51b27b75e827f5296295fa78817032e84464be8bb905bc"}, + {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4fe6a006557b87b352c04596a6e3f12a57d6e5f401d804947bd3188e6b0e0e76"}, + {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3d893b0871322eaa2f8c7072cdb552d8e2b27645b7875a70833c31e9274d4611"}, + {file = "numpy-1.21.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:341dddcfe3b7b6427a28a27baa59af5ad51baa59bfec3264f1ab287aa3b30b13"}, + {file = "numpy-1.21.5-cp37-cp37m-win32.whl", hash = "sha256:ca9c23848292c6fe0a19d212790e62f398fd9609aaa838859be8459bfbe558aa"}, + {file = "numpy-1.21.5-cp37-cp37m-win_amd64.whl", hash = "sha256:025b497014bc33fc23897859350f284323f32a2fff7654697f5a5fc2a19e9939"}, + {file = "numpy-1.21.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a5098df115340fb17fc93867317a947e1dcd978c3888c5ddb118366095851f8"}, + {file = "numpy-1.21.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:311283acf880cfcc20369201bd75da907909afc4666966c7895cbed6f9d2c640"}, + {file = "numpy-1.21.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b545ebadaa2b878c8630e5bcdb97fc4096e779f335fc0f943547c1c91540c815"}, + {file = "numpy-1.21.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c5562bcc1a9b61960fc8950ade44d00e3de28f891af0acc96307c73613d18f6e"}, + {file = "numpy-1.21.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eed2afaa97ec33b4411995be12f8bdb95c87984eaa28d76cf628970c8a2d689a"}, + {file = "numpy-1.21.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61bada43d494515d5b122f4532af226fdb5ee08fe5b5918b111279843dc6836a"}, + {file = "numpy-1.21.5-cp38-cp38-win32.whl", hash = "sha256:7b9d6b14fc9a4864b08d1ba57d732b248f0e482c7b2ff55c313137e3ed4d8449"}, + {file = "numpy-1.21.5-cp38-cp38-win_amd64.whl", hash = "sha256:dbce7adeb66b895c6aaa1fad796aaefc299ced597f6fbd9ceddb0dd735245354"}, + {file = "numpy-1.21.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:507c05c7a37b3683eb08a3ff993bd1ee1e6c752f77c2f275260533b265ecdb6c"}, + {file = "numpy-1.21.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:00c9fa73a6989895b8815d98300a20ac993c49ac36c8277e8ffeaa3631c0dbbb"}, + {file = "numpy-1.21.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69a5a8d71c308d7ef33ef72371c2388a90e3495dbb7993430e674006f94797d5"}, + {file = "numpy-1.21.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2d8adfca843bc46ac199a4645233f13abf2011a0b2f4affc5c37cd552626f27b"}, + {file = "numpy-1.21.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c293d3c0321996cd8ffe84215ffe5d269fd9d1d12c6f4ffe2b597a7c30d3e593"}, + {file = "numpy-1.21.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c978544be9e04ed12016dd295a74283773149b48f507d69b36f91aa90a643e5"}, + {file = "numpy-1.21.5-cp39-cp39-win32.whl", hash = "sha256:2a9add27d7fc0fdb572abc3b2486eb3b1395da71e0254c5552b2aad2a18b5441"}, + {file = "numpy-1.21.5-cp39-cp39-win_amd64.whl", hash = "sha256:1964db2d4a00348b7a60ee9d013c8cb0c566644a589eaa80995126eac3b99ced"}, + {file = "numpy-1.21.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a7c4b701ca418cd39e28ec3b496e6388fe06de83f5f0cb74794fa31cfa384c02"}, + {file = "numpy-1.21.5.zip", hash = "sha256:6a5928bc6241264dce5ed509e66f33676fc97f464e7a919edc672fb5532221ee"}, {file = "numpy-1.22.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:515a8b6edbb904594685da6e176ac9fbea8f73a5ebae947281de6613e27f1956"}, {file = "numpy-1.22.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76a4f9bce0278becc2da7da3b8ef854bed41a991f4226911a24a9711baad672c"}, {file = "numpy-1.22.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:168259b1b184aa83a514f307352c25c56af111c269ffc109d9704e81f72e764b"}, @@ -500,8 +538,8 @@ pathspec = [ {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, ] platformdirs = [ - {file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"}, - {file = "platformdirs-2.4.1.tar.gz", hash = "sha256:440633ddfebcc36264232365d7840a970e75e1018d15b4327d11f91909045fda"}, + {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, + {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, ] pydicom = [ {file = "pydicom-2.2.2-py3-none-any.whl", hash = "sha256:6ecb9c6d56a20b2104099b8ef8fe0f3664d797b08a0e0548fe0311b515b32308"}, @@ -512,8 +550,8 @@ pyparsing = [ {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, ] tomli = [ - {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"}, - {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"}, + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] typed-ast = [ {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, @@ -542,8 +580,8 @@ typed-ast = [ {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, ] typing-extensions = [ - {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, - {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, + {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"}, + {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"}, ] zipp = [ {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, From 9b8f140508177acccb8a209a6ff81f85285ce707 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 6 Mar 2022 00:08:05 +0200 Subject: [PATCH 4/5] Remove an unnecessary comment --- medio/backends/itk_io.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/medio/backends/itk_io.py b/medio/backends/itk_io.py index 989e39c..498035b 100644 --- a/medio/backends/itk_io.py +++ b/medio/backends/itk_io.py @@ -58,9 +58,7 @@ def read_img( if (desired_axcodes is None) or (desired_axcodes == metadata.ornt): image_np = ItkIO.itk_img_to_array(img) else: - orig_ornt = ( - metadata.ornt - ) # store the original orientation before the reorientation + orig_ornt = metadata.ornt img, _ = ItkIO.reorient(img, desired_axcodes) image_np, affine = ItkIO.unpack_img(img) metadata = MetaData( From 49f4ad15849395635483c895ff1fc39fd7aee12c Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 6 Mar 2022 00:18:41 +0200 Subject: [PATCH 5/5] Move some comments above Avoid black's line breaks --- medio/backends/itk_io.py | 15 ++++++++------- medio/backends/pdcm_io.py | 5 ++--- medio/backends/pdcm_unpack_ds.py | 5 ++--- medio/metadata/affine.py | 5 ++--- .../itk_dcm_orientations/itk_dcm_orientations.py | 10 ++++------ tests/itk_dcm_orientations/itk_utils.py | 5 ++--- 6 files changed, 20 insertions(+), 25 deletions(-) diff --git a/medio/backends/itk_io.py b/medio/backends/itk_io.py index 498035b..1557c2a 100644 --- a/medio/backends/itk_io.py +++ b/medio/backends/itk_io.py @@ -205,10 +205,12 @@ def save_img_file_long(image, filename, compression=False): @staticmethod def itk_img_to_array(img_itk): - """Swap the axes to the usual x, y, z convention in RAI orientation (originally z, y, x)""" - img_array = itk.array_from_image( - img_itk - ).T # the transpose here is equivalent to keep_axes=True + """ + Swap the axes to the usual x, y, z convention in RAI orientation + (originally z, y, x) + """ + # the transpose here is equivalent to keep_axes=True + img_array = itk.array_from_image(img_itk).T return img_array @staticmethod @@ -220,9 +222,8 @@ def array_to_itk_img(img_array, components_axis=None): img_array, components_axis, ItkIO.DEFAULT_COMPONENTS_AXIS ) is_vector = True - img_itk = itk.image_from_array( - img_array.T.copy(), is_vector=is_vector - ) # copy is crucial for the ordering + # copy is crucial for the ordering + img_itk = itk.image_from_array(img_array.T.copy(), is_vector=is_vector) return img_itk @staticmethod diff --git a/medio/backends/pdcm_io.py b/medio/backends/pdcm_io.py index 00b204d..1dad647 100644 --- a/medio/backends/pdcm_io.py +++ b/medio/backends/pdcm_io.py @@ -44,9 +44,8 @@ def read_img( :return: numpy array and metadata """ input_path = Path(input_path) - temp_channels_axis = ( - -1 - ) # if there are channels, they must be in the last axis for the reorientation + # if there are channels, they must be in the last axis for the reorientation + temp_channels_axis = -1 if input_path.is_dir(): img, metadata, channeled = PdcmIO.read_dcm_dir( input_path, diff --git a/medio/backends/pdcm_unpack_ds.py b/medio/backends/pdcm_unpack_ds.py index e938fd4..91b5695 100644 --- a/medio/backends/pdcm_unpack_ds.py +++ b/medio/backends/pdcm_unpack_ds.py @@ -85,9 +85,8 @@ def _unpack_pixel_array(dataset, rescale=None): rescale = _requires_rescaling(dataset) if rescale: - voxels = voxels.astype( - "int16", copy=False - ) # TODO: it takes time! Consider view. + # TODO: it takes time! Consider view. + voxels = voxels.astype("int16", copy=False) slope = getattr(dataset, "RescaleSlope", 1) intercept = getattr(dataset, "RescaleIntercept", 0) if int(slope) == slope and int(intercept) == intercept: diff --git a/medio/metadata/affine.py b/medio/metadata/affine.py index a450f4d..586340f 100644 --- a/medio/metadata/affine.py +++ b/medio/metadata/affine.py @@ -74,9 +74,8 @@ def spacing(self): def spacing(self, value): value = np.asarray(value) self._m_matrix = self._m_matrix @ np.diag(value / self._spacing) - self._spacing = np.abs( - value - ) # the spacing must be positive (or at least nonnegative) + # the spacing must be positive (or at least nonnegative) + self._spacing = np.abs(value) @property def direction(self): diff --git a/tests/itk_dcm_orientations/itk_dcm_orientations.py b/tests/itk_dcm_orientations/itk_dcm_orientations.py index ed1767f..f1431e8 100644 --- a/tests/itk_dcm_orientations/itk_dcm_orientations.py +++ b/tests/itk_dcm_orientations/itk_dcm_orientations.py @@ -33,12 +33,10 @@ def get_saved_ornt(image, desired_ornt, remove=True): return saved_ornt, is_equal -arr = np.random.randint( - 0, 256, size=(10, 30, 25), dtype="uint8" -) # (slices, cols, rows) -arr_rgb = np.random.randint( - 0, 256, size=(10, 30, 25, 3), dtype="uint8" -) # (slices, cols, rows, channels) +# (slices, cols, rows) +arr = np.random.randint(0, 256, size=(10, 30, 25), dtype="uint8") +# (slices, cols, rows, channels) +arr_rgb = np.random.randint(0, 256, size=(10, 30, 25, 3), dtype="uint8") img = itk.image_from_array(arr) img_rgb = itk.image_from_array(arr_rgb, is_vector=True) diff --git a/tests/itk_dcm_orientations/itk_utils.py b/tests/itk_dcm_orientations/itk_utils.py index 872d110..87c0289 100644 --- a/tests/itk_dcm_orientations/itk_utils.py +++ b/tests/itk_dcm_orientations/itk_utils.py @@ -3,9 +3,8 @@ def set_image_direction(image, direction): dim = 3 - direction_vnl_mat = itk.vnl_matrix_from_array( - direction.astype("float").copy() - ) # copy is crucial for the float + # copy is crucial for the float + direction_vnl_mat = itk.vnl_matrix_from_array(direction.astype("float").copy()) direction_itk = itk.Matrix[itk.D, dim, dim](direction_vnl_mat) image.SetDirection(direction_itk)