Skip to content

Commit

Permalink
Add upload_annotation_volume
Browse files Browse the repository at this point in the history
Add script and unit test

Rename dicom_utils to dicoms_to_volume.
  Add script
  Add unit test
Rename upload_annotations to upload_annotation_slice.
Update README.md
  How to use dicoms_to_volume
  How to use upload_annotation_volume
  • Loading branch information
phcerdan committed Oct 27, 2023
1 parent 99d2d29 commit 00d22ff
Show file tree
Hide file tree
Showing 9 changed files with 426 additions and 75 deletions.
28 changes: 25 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# mdai-utils

Utility functions for MD.ai
Utility functions for MD.ai. Download and upload 2D and 3D segmentation images.

## Download data

Expand All @@ -22,16 +22,38 @@ python -m mdai_utils.download_annotations \
-o ./data
```

## Upload annotations / segmentations
## Upload 2D segmentations

```bash
python -m mdai_utils.upload_annotations \
python -m mdai_utils.upload_annotation_slice \
--parameters ./tests/test_local_parameters.json \
--sop_instance_uid "1.2.826.0.1.3680043.2.1125.1.75064541463040.2005072610414630768" \
--label_name mylabel \
-i ./tests/fixtures/humanct_0002_1000_seg.nii.gz
```

## Upload 3D segmentations

MDai works with dicoms, and use the SOPInstanceUID as the key to match slices.
Your algorithm might work with 3D volumes, (.nrrd, .nii.gz, etc). You can convert the an input dicom_folder to a 3D volume, and also store the mapping between the new volume indices and the original dicom file, with its SOPInstanceUID.

```bash
python -m mdai_utils.dicom_to_volume -i ./tests/fixtures/humanct_0002_1000_1004 -o /tmp/humanct_0002_1000_1004.nrrd
```

Parallel to the output image location, a `{image_filename}_SOPInstanceUIDs.json` will be saved with the slice
mappings.

If we have a 3D volume segmentation we want to upload, use the mappings:

```bash
python -m mdai_utils.upload_annotation_volume \
--parameters ./tests/test_local_parameters.json \
--sop_instance_uids_file ./tests/fixtures/humanct_0002_1000_1004_SOPInstanceUIDs.json \
--label_name mylabel \
-i ./tests/fixtures/humanct_0002_1000_1004_seg.nii.gz
```

## Development

For information about building, running, and contributing to this code base,
Expand Down
47 changes: 46 additions & 1 deletion mdai_utils/dicom_utils.py → mdai_utils/dicoms_to_volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,12 @@ class ImageAndMetadata(NamedTuple):
metadata: dict


def read_dicoms_into_volume(valid_dcm_file_list) -> ImageAndMetadata:
def metadata_dict_to_sop_instance_uids(metadata_dict):
"""Convert a metadata dictionary to a dictionary mapping slice index to SOPInstanceUID."""
return {int(k): v["SOPInstanceUID"] for k, v in metadata_dict.items()}


def dicoms_to_volume(valid_dcm_file_list) -> ImageAndMetadata:
"""Convert a list of DICOM files to a image volume. Also returns metadata
(SOPInstanceUID) for each slice in the volume.
Expand Down Expand Up @@ -58,3 +63,43 @@ def read_dicoms_into_volume(valid_dcm_file_list) -> ImageAndMetadata:
for slice_idx, (fn, uid) in enumerate(zip(dicom_names, uids))
}
return ImageAndMetadata(image=image, metadata=metadata)


def _get_parser():
import argparse

parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input_directory",
type=str,
required=True,
help="Path to the directory containing the DICOM files.",
)
parser.add_argument(
"-o",
"--output_image",
type=str,
required=True,
help="Path to the output image.",
)
return parser


def main(input_directory, output_image):
"""
Convert a directory containing DICOM files to a volume image, and save the metadata to map SOPInstanceUID to slice indices.
"""
valid_dcm_file_list = file_list_from_directory(input_directory)
image_and_metadata = dicoms_to_volume(valid_dcm_file_list)
write_image_and_metadata(
image=image_and_metadata.image,
metadata=image_and_metadata.metadata,
output_image_filename=output_image,
)


if __name__ == "__main__":
parser = _get_parser()
args = parser.parse_args()
main(args.input_directory, args.output_image)
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@
import numpy as np


def read_data_image(image_path):
"""
Read an image using itk, and returns a numpy data array.
"""
image = itk.imread(image_path)
data_np = itk.array_from_image(image)
return data_np


def upload_data_annotation_slice(
data_np: np.ndarray,
sop_instance_uid: str,
Expand Down Expand Up @@ -36,15 +45,6 @@ def upload_data_annotation_slice(
return failed_annotations


def read_data_image(image_path):
"""
Read an image using itk, and returns a numpy data array.
"""
image = itk.imread(image_path)
data_np = itk.array_from_image(image)
return data_np


def upload_image_annotation_slice(
segmentation_image_path: str,
sop_instance_uid: str,
Expand All @@ -56,9 +56,10 @@ def upload_image_annotation_slice(
"""
Uploads an annotation to the server. It requires that the DICOM image is already
uploaded to the server. See upload_dicom_image.py for that.
The input image can be in any format supported by ITK.
Args:
segmentation_image_path (str): Path to the segmentation image. With fixed metadata. See @inverse_transform
segmentation_image_path (str): Path to the segmentation image.
sop_instance_uid (str): SOPInstanceUID of the DICOM key-slice image. Returned from @inverse_transform
mdai_client (mdai.Client): Client to the MD.ai API. See @get_mdai_client
mdai_project_id (str): Project ID. Check in the MD.ai web interface.
Expand All @@ -74,11 +75,11 @@ def upload_image_annotation_slice(
data_np = data_np.squeeze(0)
return upload_data_annotation_slice(
data_np=data_np,
sop_instance_uid=sop_instance_uid,
mdai_client=mdai_client,
mdai_project_id=mdai_project_id,
mdai_dataset_id=mdai_dataset_id,
mdai_label_id=mdai_label_id,
sop_instance_uid=sop_instance_uid,
)


Expand Down Expand Up @@ -106,12 +107,6 @@ def _get_parser():
default=None,
help="sop_instance_uid of the annotation file. Needed to match the annotation with the DICOM image in mdai.",
)
parser.add_argument(
"--sop_instance_metadata",
type=str,
default=None,
help="json file generated by storing the metadata resulting from dicom_utils.read_dicoms_into_volume",
)
parser.add_argument(
"-p",
"--parameters",
Expand All @@ -126,6 +121,27 @@ def _get_parser():
return parser


def main(
input_annotation,
label_name,
sop_instance_uid,
mdai_client,
mdai_project_id,
mdai_dataset_id,
mdai_label_ids,
):
mdai_label_id = mdai_label_ids[label_name]
failed_annotations = upload_image_annotation_slice(
segmentation_image_path=input_annotation,
sop_instance_uid=sop_instance_uid,
mdai_client=mdai_client,
mdai_project_id=mdai_project_id,
mdai_dataset_id=mdai_dataset_id,
mdai_label_id=mdai_label_id,
)
return failed_annotations


if __name__ == "__main__":
import json

Expand All @@ -140,30 +156,30 @@ def _get_parser():

mdai_project_id = parameters["mdai_project_id"]
mdai_dataset_id = parameters["mdai_dataset_id"]
mdai_label_group_id = parameters["mdai_label_group_id"]
mdai_label_ids = parameters["mdai_label_ids"]
mdai_domain = parameters["mdai_domain"]

input_annotation = args.input_annotation
label_name = args.label_name
mdai_label_id = mdai_label_ids[label_name]
sop_instance_uid = args.sop_instance_uid
sop_instance_metadata = args.sop_instance_metadata
if sop_instance_uid is None:
raise ValueError(
"Either sop_instance_uid or sop_instance_metadata must be set."
"sop_instance_uid is required to match the annotation with the DICOM image in mdai."
)
token = get_mdai_access_token()
mdai_client = mdai.Client(domain=mdai_domain, access_token=token)

failed_annotations = upload_image_annotation_slice(
segmentation_image_path=input_annotation,
failed_annotations = main(
input_annotation=input_annotation,
label_name=label_name,
sop_instance_uid=sop_instance_uid,
mdai_client=mdai_client,
mdai_project_id=mdai_project_id,
mdai_dataset_id=mdai_dataset_id,
mdai_label_id=mdai_label_id,
mdai_label_ids=mdai_label_ids,
)

if len(failed_annotations) == 0:
print("All annotations uploaded successfully.")
exit(0)
Expand Down
Loading

0 comments on commit 00d22ff

Please sign in to comment.