diff --git a/data/test_files/seg_image_grayscale_labeled.dcm b/data/test_files/seg_image_grayscale_labeled.dcm new file mode 100644 index 00000000..5e718201 Binary files /dev/null and b/data/test_files/seg_image_grayscale_labeled.dcm differ diff --git a/docs/usage.rst b/docs/usage.rst index 8bd86bb5..73fe02d3 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -121,7 +121,7 @@ Derive a Segmentation image from a multi-frame Slide Microscopy (SM) image: ) # Create the Segmentation instance - seg_dataset = Segmentation( + seg_dataset = hd.seg.Segmentation( source_images=[image_dataset], pixel_array=mask, segmentation_type=hd.seg.SegmentationTypeValues.BINARY, diff --git a/src/highdicom/seg/enum.py b/src/highdicom/seg/enum.py index dcdfd6fa..d14d7a08 100644 --- a/src/highdicom/seg/enum.py +++ b/src/highdicom/seg/enum.py @@ -17,6 +17,7 @@ class SegmentationTypeValues(Enum): BINARY = 'BINARY' FRACTIONAL = 'FRACTIONAL' + LABELED = 'LABELED' class SegmentationFractionalTypeValues(Enum): diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 581ac851..70550055 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -274,6 +274,8 @@ def __init__( if len(source_images) == 0: raise ValueError('At least one source image is required.') + segmentation_type = SegmentationTypeValues(segmentation_type) + uniqueness_criteria = set( ( image.StudyInstanceUID, @@ -298,17 +300,46 @@ def __init__( 'are multi-frame images.' ) is_tiled = hasattr(src_img, 'TotalPixelMatrixRows') - supported_transfer_syntaxes = { - ImplicitVRLittleEndian, - ExplicitVRLittleEndian, + + supported_compressed_transfer_syntaxes = { JPEG2000Lossless, JPEGLSLossless, RLELossless, } + supported_native_transfer_syntaxes = { + ImplicitVRLittleEndian, + ExplicitVRLittleEndian, + } + supported_transfer_syntaxes = { + *supported_compressed_transfer_syntaxes, + *supported_native_transfer_syntaxes + } if transfer_syntax_uid not in supported_transfer_syntaxes: raise ValueError( f'Transfer syntax "{transfer_syntax_uid}" is not supported.' ) + if segmentation_type == SegmentationTypeValues.BINARY: + if transfer_syntax_uid not in supported_native_transfer_syntaxes: + raise ValueError( + f'Transfer syntax "{transfer_syntax_uid}" is not supported ' + 'for segmentation type BINARY.' + 'Supported are "{}"'.format( + '", "'.join(supported_native_transfer_syntaxes) + ) + ) + if segmentation_type == SegmentationTypeValues.LABELED: + if ( + pixel_array.dtype.itemsize > 2 and + transfer_syntax_uid not in supported_native_transfer_syntaxes + ): + raise ValueError( + f'Transfer syntax "{transfer_syntax_uid}" is not supported ' + 'for segmentation type LABELED with more than 16 bits ' + 'allocated per sample.' + 'Supported are "{}"'.format( + '", "'.join(supported_native_transfer_syntaxes) + ) + ) if pixel_array.ndim == 2: pixel_array = pixel_array[np.newaxis, ...] @@ -442,11 +473,9 @@ def __init__( self.ContentCreatorIdentificationCodeSequence = \ content_creator_identification - segmentation_type = SegmentationTypeValues(segmentation_type) self.SegmentationType = segmentation_type.value if self.SegmentationType == SegmentationTypeValues.BINARY.value: self.BitsAllocated = 1 - self.HighBit = 0 if self.file_meta.TransferSyntaxUID.is_encapsulated: raise ValueError( 'The chosen transfer syntax ' @@ -455,22 +484,30 @@ def __init__( ) elif self.SegmentationType == SegmentationTypeValues.FRACTIONAL.value: self.BitsAllocated = 8 - self.HighBit = 7 segmentation_fractional_type = SegmentationFractionalTypeValues( fractional_type ) self.SegmentationFractionalType = segmentation_fractional_type.value if max_fractional_value > 2**8: raise ValueError( - 'Maximum fractional value must not exceed image bit depth.' + 'Maximum fractional value must not exceed ' + 'the number allocated bits.' ) self.MaximumFractionalValue = max_fractional_value + elif self.SegmentationType == SegmentationTypeValues.LABELED.value: + bit_depth = pixel_array.dtype.itemsize * 8 + if bit_depth not in (8, 16, 32): + raise ValueError( + 'The number of allocated bits must be either 8, 16, or 32.' + ) + self.BitsAllocated = bit_depth else: raise ValueError( - 'Unknown segmentation type "{}"'.format(segmentation_type) + f'Unknown segmentation type "{segmentation_type}".' ) self.BitsStored = self.BitsAllocated + self.HighBit = self.BitsAllocated - 1 self.LossyImageCompression = getattr( src_img, 'LossyImageCompression', @@ -753,7 +790,7 @@ def __init__( for i, segment_number in enumerate(described_segment_numbers): # Pixel array for just this segment - if pixel_array.dtype in (np.float_, np.float32, np.float64): + if pixel_array.dtype.kind == 'f': # Floating-point numbers must be mapped to 8-bit integers in # the range [0, max_fractional_value]. if pixel_array.ndim == 4: @@ -764,27 +801,34 @@ def __init__( segment_array * float(self.MaximumFractionalValue) ) planes = planes.astype(np.uint8) - elif pixel_array.dtype in (np.uint8, np.uint16): - # Note that integer arrays with segments stacked down the last - # dimension will already have been converted to bool, leaving - # only "label maps" here, which must be converted to binary - # masks. - planes = np.zeros(pixel_array.shape, dtype=np.uint8) - planes[pixel_array == segment_number] = 1 - elif pixel_array.dtype == np.bool_: + elif pixel_array.dtype.kind in 'u' or pixel_array.dtype == np.bool_: if pixel_array.ndim == 4: planes = pixel_array[:, :, :, segment_number - 1] else: - planes = pixel_array - planes = planes.astype(np.uint8) - # It may happen that a boolean array is passed that should be - # interpreted as fractional segmentation type. In this case, we - # also need to stretch pixel valeus to 8-bit unsigned integer - # range by multiplying with the maximum fractional value. + if segmentation_type == SegmentationTypeValues.BINARY: + # Note that integer arrays with segments stacked down + # the last dimension will already have been converted + # to bool, leaving only "label maps" here, which must + # be converted to binary masks. + planes = np.zeros(pixel_array.shape, dtype=np.uint8) + planes[pixel_array == segment_number] = 1 + else: + planes = pixel_array if segmentation_type == SegmentationTypeValues.FRACTIONAL: - planes *= int(self.MaximumFractionalValue) + # It may happen that a boolean array is passed that + # should be interpreted as fractional segmentation + # type. In this case, we also need to stretch pixel + # values to 8-bit unsigned integer range by multiplying + # with the maximum fractional value. + planes = planes.astype(np.float32) + planes *= np.float32(self.MaximumFractionalValue) + planes = planes.astype(np.uint8) else: - raise TypeError('Pixel array has an invalid data type.') + raise TypeError( + 'Pixel array has an invalid data type. ' + 'Data type must be either np.bool_, numpy.float32, ' + 'numpy.float64, numpy.uint8, numpy.uint16, or numpy.uint32.' + ) contained_plane_index = [] for j in plane_sort_index: @@ -1038,48 +1082,58 @@ def _check_and_cast_pixel_array( f'({len(described_segment_numbers)}).' ) - if pixel_array.dtype in (np.bool_, np.uint8, np.uint16): + if pixel_array.dtype.kind == 'u' or pixel_array.dtype == np.bool_: if pixel_array.ndim == 3: - # A label-map style array where pixel values represent - # segment associations - segments_present = np.unique( - pixel_array[pixel_array > 0].astype(np.uint16) - ) - - # The pixel values in the pixel array must all belong to - # a described segment - if not np.all( + if segmentation_type == SegmentationTypeValues.BINARY: + # A label-map style array where pixel values represent + # segment associations + segments_present = np.unique( + pixel_array[pixel_array > 0].astype(np.uint16) + ) + # The pixel values in the pixel array must all belong to + # a described segment + if not np.all( np.in1d(segments_present, described_segment_numbers) ): - raise ValueError( - 'Pixel array contains segments that lack ' - 'descriptions.' - ) + raise ValueError( + 'Pixel array contains segments that lack ' + 'descriptions.' + ) + elif segmentation_type == SegmentationTypeValues.LABELED: + if pixel_array.max() >= 2 ** 32: + raise ValueError( + 'When passing segments for segmentation type ' + 'LABELED with an integer data type, ' + 'pixels must not exceed 32-bit depth.' + ) # By construction of the pixel array, we know that the segments # cannot overlap segments_overlap = SegmentsOverlapValues.NO + else: - # Pixel array is 4D where each segment is stacked down - # the last dimension - # In this case, each segment of the pixel array should be binary - if pixel_array.max() > 1: - raise ValueError( - 'When passing a 4D stack of segments with an integer ' - 'pixel type, the pixel array must be binary.' - ) - pixel_array = pixel_array.astype(np.bool_) + if segmentation_type == SegmentationTypeValues.BINARY: + # Pixel array is 4D where each segment is stacked down the + # last dimension. In this case, each segment of the pixel + # array should be binary. + if pixel_array.max() > 1: + raise ValueError( + 'When passing a 4D stack of segments for ' + 'segmentation type BINARY with an integer data ' + 'type, the pixel array must be binary.' + ) + pixel_array = pixel_array.astype(np.bool_) # Need to check whether or not segments overlap if pixel_array.shape[-1] == 1: # A single segment does not overlap segments_overlap = SegmentsOverlapValues.NO - elif pixel_array.sum(axis=-1).max() > 1: + elif (pixel_array > 0).sum(axis=-1).max() > 1: segments_overlap = SegmentsOverlapValues.YES else: segments_overlap = SegmentsOverlapValues.NO - elif (pixel_array.dtype in (np.float_, np.float32, np.float64)): + elif pixel_array.dtype.kind == 'f': unique_values = np.unique(pixel_array) if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0: raise ValueError( diff --git a/tests/test_seg.py b/tests/test_seg.py index c7547ba8..6b9b8bfd 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -624,11 +624,11 @@ def setUp(self): self._ct_image = dcmread( str(data_dir.joinpath('test_files', 'ct_image.dcm')) ) - self._ct_pixel_array = np.zeros( + self._ct_mask_array = np.zeros( self._ct_image.pixel_array.shape, dtype=bool ) - self._ct_pixel_array[1:5, 10:15] = True + self._ct_mask_array[1:5, 10:15] = True # A single CR image self._cr_image = dcmread( @@ -644,17 +644,29 @@ def setUp(self): axis=2 )[None, :] - # A microscopy image - self._sm_image = dcmread( + # A color slide microscopy image + self._sm_color = dcmread( str(data_dir.joinpath('test_files', 'sm_image.dcm')) ) - # Override te existing ImageOrientationSlide to make the frame ordering + # Override the existing ImageOrientationSlide to make the frame ordering # simpler for the tests - self._sm_pixel_array = np.zeros( - self._sm_image.pixel_array.shape[:3], # remove colour channel axis + self._sm_mask_array = np.zeros( + self._sm_color.pixel_array.shape[:3], # remove colour channel axis dtype=bool ) - self._sm_pixel_array[2:3, 1:5, 7:9] = True + self._sm_mask_array[2, 1:5, 7:9] = True + + # A grayscale slide microscopy image + self._sm_grayscale = dcmread( + str(data_dir.joinpath('test_files', 'sm_image_grayscale.dcm')) + ) + self._sm_grayscale_mask_array = np.zeros( + self._sm_grayscale.pixel_array.shape, + dtype=np.uint32 + ) + self._sm_grayscale_mask_array[0, 1:5, 7:9] = 1 + self._sm_grayscale_mask_array[1, 3:7, 2:4] = 2 + self._sm_grayscale_mask_array[1, 1:5, 7:9] = 3 # A series of single frame CT images ct_series = [ @@ -775,7 +787,7 @@ def check_dimension_index_vals(seg): def test_construction(self): instance = Segmentation( [self._ct_image], - self._ct_pixel_array, + self._ct_mask_array, SegmentationTypeValues.FRACTIONAL.value, self._segment_descriptions, self._series_instance_uid, @@ -873,8 +885,8 @@ def test_construction(self): def test_construction_2(self): instance = Segmentation( - [self._sm_image], - self._sm_pixel_array, + [self._sm_color], + self._sm_mask_array, SegmentationTypeValues.FRACTIONAL.value, self._segment_descriptions, self._series_instance_uid, @@ -886,43 +898,43 @@ def test_construction_2(self): self._software_versions, self._device_serial_number ) - assert instance.PatientID == self._sm_image.PatientID - assert instance.AccessionNumber == self._sm_image.AccessionNumber + assert instance.PatientID == self._sm_color.PatientID + assert instance.AccessionNumber == self._sm_color.AccessionNumber assert instance.ContainerIdentifier == \ - self._sm_image.ContainerIdentifier + self._sm_color.ContainerIdentifier assert instance.SpecimenDescriptionSequence[0].SpecimenUID == \ - self._sm_image.SpecimenDescriptionSequence[0].SpecimenUID + self._sm_color.SpecimenDescriptionSequence[0].SpecimenUID assert len(instance.SegmentSequence) == 1 assert instance.SegmentSequence[0].SegmentNumber == 1 assert len(instance.SourceImageSequence) == 1 ref_item = instance.SourceImageSequence[0] assert ref_item.ReferencedSOPInstanceUID == \ - self._sm_image.SOPInstanceUID - assert instance.Rows == self._sm_image.pixel_array.shape[1] - assert instance.Columns == self._sm_image.pixel_array.shape[2] + self._sm_color.SOPInstanceUID + assert instance.Rows == self._sm_color.pixel_array.shape[1] + assert instance.Columns == self._sm_color.pixel_array.shape[2] assert instance.TotalPixelMatrixRows == \ - self._sm_image.TotalPixelMatrixRows + self._sm_color.TotalPixelMatrixRows assert instance.TotalPixelMatrixColumns == \ - self._sm_image.TotalPixelMatrixColumns + self._sm_color.TotalPixelMatrixColumns assert len(instance.SharedFunctionalGroupsSequence) == 1 shared_item = instance.SharedFunctionalGroupsSequence[0] assert len(shared_item.PixelMeasuresSequence) == 1 pm_item = shared_item.PixelMeasuresSequence[0] - src_shared_item = self._sm_image.SharedFunctionalGroupsSequence[0] + src_shared_item = self._sm_color.SharedFunctionalGroupsSequence[0] src_pm_item = src_shared_item.PixelMeasuresSequence[0] assert pm_item.PixelSpacing == src_pm_item.PixelSpacing assert pm_item.SliceThickness == src_pm_item.SliceThickness assert len(shared_item.PlaneOrientationSequence) == 1 assert instance.ImageOrientationSlide == \ - self._sm_image.ImageOrientationSlide + self._sm_color.ImageOrientationSlide assert instance.TotalPixelMatrixOriginSequence == \ - self._sm_image.TotalPixelMatrixOriginSequence + self._sm_color.TotalPixelMatrixOriginSequence assert len(instance.DimensionOrganizationSequence) == 1 assert len(instance.DimensionIndexSequence) == 6 # Number of frames should be number of frames in the segmentation mask # that are non-empty, due to sparsity - num_frames = (self._sm_pixel_array.sum(axis=(1, 2)) > 0).sum() + num_frames = (self._sm_mask_array.sum(axis=(1, 2)) > 0).sum() assert instance.NumberOfFrames == num_frames assert len(instance.PerFrameFunctionalGroupsSequence) == num_frames frame_item = instance.PerFrameFunctionalGroupsSequence[0] @@ -1365,11 +1377,41 @@ def test_construction_7(self): assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO - def test_pixel_types(self): + def test_construction_8(self): + instance = Segmentation( + [self._sm_grayscale], + self._sm_grayscale_mask_array, + SegmentationTypeValues.LABELED.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number + ) + assert instance.Rows == self._sm_grayscale.pixel_array.shape[1] + assert instance.Columns == self._sm_grayscale.pixel_array.shape[2] + assert instance.TotalPixelMatrixRows == \ + self._sm_grayscale.TotalPixelMatrixRows + assert instance.TotalPixelMatrixColumns == \ + self._sm_grayscale.TotalPixelMatrixColumns + + # Number of frames should be number of frames in the segmentation mask + # that are non-empty, due to sparsity + num_frames = (self._sm_grayscale_mask_array.sum(axis=(1, 2)) > 0).sum() + assert instance.NumberOfFrames == num_frames + assert len(instance.PerFrameFunctionalGroupsSequence) == num_frames + assert len(instance.SegmentSequence) == 1 + self.check_dimension_index_vals(instance) + + def test_pixel_types_binary_fractional(self): # A series of tests on different types of image tests = [ - ([self._ct_image], self._ct_pixel_array), - ([self._sm_image], self._sm_pixel_array), + ([self._ct_image], self._ct_mask_array), + ([self._sm_color], self._sm_mask_array), (self._ct_series, self._ct_series_mask_array), ([self._ct_multiframe], self._ct_multiframe_mask_array), ] @@ -1460,17 +1502,10 @@ def test_pixel_types(self): transfer_syntax_uid=transfer_syntax_uid ) - # Ensure the recovered pixel array matches what is expected - if pix_type in (np.bool_, np.float_): - assert np.array_equal( - self.get_array_after_writing(instance), - expected_encoding * max_fractional_value - ), f'{sources[0].Modality} {transfer_syntax_uid}' - else: - assert np.array_equal( - self.get_array_after_writing(instance), - expected_encoding - ), f'{sources[0].Modality} {transfer_syntax_uid}' + assert np.array_equal( + self.get_array_after_writing(instance), + expected_encoding * max_fractional_value + ), f'{sources[0].Modality} {transfer_syntax_uid}' self.check_dimension_index_vals(instance) # Multi-segment (exclusive) @@ -1685,6 +1720,156 @@ def test_pixel_types(self): ), f'{sources[0].Modality} {transfer_syntax_uid}' self.check_dimension_index_vals(instance) + def test_pixel_types_labeled(self): + # A series of tests on different types of image + tests = [ + ([self._sm_grayscale], self._sm_grayscale_mask_array), + ] + + for sources, mask in tests: + + # Two segments, overlapping + multi_segment_overlap = np.stack([mask, mask], axis=-1) + if multi_segment_overlap.ndim == 3: + multi_segment_overlap = multi_segment_overlap[np.newaxis, ...] + + # Two segments non-overlapping + additional_mask = np.zeros(mask.shape, mask.dtype) + additional_mask[mask == 0] = 1 + multi_segment_exc = np.stack([mask, additional_mask], axis=-1) + if multi_segment_exc.ndim == 3: + multi_segment_exc = multi_segment_exc[np.newaxis, ...] + + # Find the expected encodings for the masks + if mask.ndim > 2: + # Expected encoding of the mask + expected_encoding = self.sort_frames( + sources, + mask + ) + expected_encoding = self.remove_empty_frames( + expected_encoding + ) + + # Expected encoding of the complement + expected_encoding_comp = self.sort_frames( + sources, + additional_mask + ) + expected_encoding_comp = self.remove_empty_frames( + expected_encoding_comp + ) + + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.concatenate( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.concatenate( + [expected_encoding, expected_encoding_comp], + axis=0 + ) + expected_encoding = expected_encoding.squeeze() + else: + expected_encoding = mask + additional_expected_encoding = additional_mask + + # Expected encoding of the multi segment arrays + expected_enc_overlap = np.stack( + [expected_encoding, expected_encoding], + axis=0 + ) + expected_enc_exc = np.stack( + [expected_encoding, additional_expected_encoding], + axis=0 + ) + + # Test instance creation for different pixel types and transfer + # syntaxes + valid_transfer_syntaxes = [ + ExplicitVRLittleEndian, + ImplicitVRLittleEndian, + ] + + for transfer_syntax_uid in valid_transfer_syntaxes: + for pix_type in [np.uint8, np.uint16, np.uint32]: + instance = Segmentation( + sources, + mask.astype(pix_type), + SegmentationTypeValues.LABELED.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + transfer_syntax_uid=transfer_syntax_uid + ) + + # Ensure the recovered pixel array matches what is expected + assert np.array_equal( + self.get_array_after_writing(instance), + expected_encoding.astype(pix_type) + ), f'{sources[0].Modality} {transfer_syntax_uid}' + self.check_dimension_index_vals(instance) + + # Multi-segment (exclusive) + instance = Segmentation( + sources, + multi_segment_exc.astype(pix_type), + SegmentationTypeValues.LABELED.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + transfer_syntax_uid=transfer_syntax_uid + ) + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.NO.value + ) + + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_exc.astype(pix_type) + ), f'{sources[0].Modality} {transfer_syntax_uid}' + self.check_dimension_index_vals(instance) + + # Multi-segment (overlapping) + instance = Segmentation( + sources, + multi_segment_overlap.astype(pix_type), + SegmentationTypeValues.LABELED.value, + self._both_segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + transfer_syntax_uid=transfer_syntax_uid + ) + assert ( + instance.SegmentsOverlap == + SegmentsOverlapValues.YES.value + ) + + assert np.array_equal( + self.get_array_after_writing(instance), + expected_enc_overlap.astype(pix_type) + ), f'{sources[0].Modality} {transfer_syntax_uid}' + self.check_dimension_index_vals(instance) + def test_odd_number_pixels(self): # Test that an image with an odd number of pixels per frame is encoded # properly Including when additional segments are subsequently added @@ -1826,7 +2011,7 @@ def test_construction_empty_source_image(self): with pytest.raises(ValueError): Segmentation( source_images=[], # empty - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._segment_descriptions @@ -1845,7 +2030,7 @@ def test_construction_invalid_content_label(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._segment_descriptions @@ -1865,7 +2050,7 @@ def test_construction_mixed_source_series(self): with pytest.raises(ValueError): Segmentation( source_images=self._ct_series + [self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._additional_segment_descriptions # seg num 2 @@ -1884,7 +2069,7 @@ def test_construction_wrong_number_of_segments(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array[..., np.newaxis], + pixel_array=self._ct_mask_array[..., np.newaxis], segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._both_segment_descriptions @@ -1928,7 +2113,7 @@ def test_construction_segment_numbers_start_wrong(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._additional_segment_descriptions # seg num 2 @@ -1948,7 +2133,7 @@ def test_construction_empty_invalid_floats(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], # empty - pixel_array=self._ct_pixel_array.astype(np.float_) * 2, + pixel_array=self._ct_mask_array.astype(np.float_) * 2, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._segment_descriptions @@ -1968,7 +2153,7 @@ def test_construction_empty_invalid_floats_binary(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array.astype(np.float_) * 0.5, + pixel_array=self._ct_mask_array.astype(np.float_) * 0.5, segmentation_type=SegmentationTypeValues.BINARY.value, segment_descriptions=( self._segment_descriptions @@ -1988,7 +2173,7 @@ def test_construction_empty_invalid_dtype(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array.astype(np.int16), + pixel_array=self._ct_mask_array.astype(np.int16), segmentation_type=SegmentationTypeValues.BINARY.value, segment_descriptions=( self._segment_descriptions @@ -2007,7 +2192,7 @@ def test_construction_wrong_segment_order(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._additional_segment_descriptions + # seg 2 @@ -2027,7 +2212,7 @@ def test_construction_duplicate_segment_number(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=( self._segment_descriptions + @@ -2047,8 +2232,8 @@ def test_construction_non_described_segment(self): with pytest.raises(ValueError): Segmentation( source_images=[self._ct_image], - pixel_array=(self._ct_pixel_array * 3).astype(np.uint8), - segmentation_type=SegmentationTypeValues.FRACTIONAL.value, + pixel_array=(self._ct_mask_array * 3).astype(np.uint8), + segmentation_type=SegmentationTypeValues.BINARY.value, segment_descriptions=( self._segment_descriptions + self._additional_segment_descriptions @@ -2140,7 +2325,7 @@ def test_construction_plane_orientation_no_frame_of_reference(self): def test_construction_missing_required_attribute(self): with pytest.raises(TypeError): Segmentation( - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2173,7 +2358,7 @@ def test_construction_missing_required_attribute_3(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, series_instance_uid=self._series_instance_uid, series_number=self._series_number, @@ -2189,7 +2374,7 @@ def test_construction_missing_required_attribute_4(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_number=self._series_number, @@ -2205,7 +2390,7 @@ def test_construction_missing_required_attribute_5(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2221,7 +2406,7 @@ def test_construction_missing_required_attribute_6(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2237,7 +2422,7 @@ def test_construction_missing_required_attribute_7(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2253,7 +2438,7 @@ def test_construction_missing_required_attribute_8(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2269,7 +2454,7 @@ def test_construction_missing_required_attribute_9(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2285,7 +2470,7 @@ def test_construction_missing_required_attribute_10(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2301,7 +2486,7 @@ def test_construction_missing_required_attribute_11(self): with pytest.raises(TypeError): Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2321,7 +2506,7 @@ def test_construction_optional_arguments(self): series_description = 'My First Segmentation' instance = Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2366,7 +2551,7 @@ def test_construction_optional_arguments_2(self): ] instance = Segmentation( source_images=[self._ct_image], - pixel_array=self._ct_pixel_array, + pixel_array=self._ct_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2409,11 +2594,11 @@ def test_spatial_positions_not_preserved(self): image_position=(i * 1.0, i * 1.0, 1.0), pixel_matrix_position=(i * 1, i * 1) ) - for i in range(self._sm_image.pixel_array.shape[0]) + for i in range(self._sm_color.pixel_array.shape[0]) ] instance = Segmentation( - source_images=[self._sm_image], - pixel_array=self._sm_pixel_array, + source_images=[self._sm_color], + pixel_array=self._sm_mask_array, segmentation_type=SegmentationTypeValues.FRACTIONAL.value, segment_descriptions=self._segment_descriptions, series_instance_uid=self._series_instance_uid, @@ -2448,6 +2633,13 @@ def setUp(self): self._sm_control_seg_ds ) + self._sm_labeled_seg_ds = dcmread( + 'data/test_files/seg_image_grayscale_labeled.dcm' + ) + self._sm_labeled_seg = Segmentation.from_dataset( + self._sm_labeled_seg_ds + ) + self._ct_binary_seg_ds = dcmread( 'data/test_files/seg_image_ct_binary.dcm' ) @@ -2513,6 +2705,12 @@ def test_properties(self): seg_property = self._sm_control_seg.segmented_property_types[0] assert seg_property == codes.SCT.ConnectiveTissue + seg_type = self._sm_labeled_seg.segmentation_type + assert seg_type == SegmentationTypeValues.LABELED + assert self._sm_labeled_seg.segmentation_fractional_type is None + assert self._sm_labeled_seg.number_of_segments == 1 + assert self._sm_labeled_seg.segment_numbers == range(1, 2) + # CT segs for seg in self._ct_segs: seg_type = seg.segmentation_type @@ -3105,7 +3303,7 @@ def setUp(self): self._ct_image = dcmread( str(data_dir.joinpath('test_files', 'ct_image.dcm')) ) - self._sm_image = dcmread( + self._sm_color = dcmread( str(data_dir.joinpath('test_files', 'sm_image.dcm')) )