Skip to content

API Reference

Structure:

Lime

ImageSpatialAttributes

Bases: ImageAttributes

Source code in src/meteors/lime.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
class ImageSpatialAttributes(ImageAttributes):
    segmentation_mask: Annotated[
        np.ndarray | torch.Tensor,
        Field(
            kw_only=False,
            validate_default=True,
            description="Segmentation mask used for the explanation.",
        ),
    ]

    _flattened_segmentation_mask: torch.Tensor = None

    @model_validator(mode="after")
    def validate_segmentation_mask(self) -> Self:
        if isinstance(self.segmentation_mask, np.ndarray):
            self.segmentation_mask = torch.tensor(self.segmentation_mask, device=self._device)

        if self.segmentation_mask.device != self._device:
            self.segmentation_mask = self.segmentation_mask.to(self._device)  # move to the device

        return self

    def to(self, device: torch.device) -> Self:
        super().to(device)
        self.segmentation_mask = self.segmentation_mask.to(device)
        return self

    def get_flattened_segmentation_mask(self) -> torch.tensor:
        """segmentation mask is after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor"""
        if self._flattened_segmentation_mask is None:
            self._flattened_segmentation_mask = self.segmentation_mask.select(dim=self.image.band_axis, index=0)
        return self._flattened_segmentation_mask

    def get_flattened_attributes(self) -> torch.tensor:
        """attributions for spatial case are after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor"""
        if self._flattened_attributes is None:
            self._flattened_attributes = self.attributes.select(dim=self.image.band_axis, index=0)
        return self._flattened_attributes

get_flattened_attributes()

attributions for spatial case are after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor

Source code in src/meteors/lime.py
129
130
131
132
133
def get_flattened_attributes(self) -> torch.tensor:
    """attributions for spatial case are after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor"""
    if self._flattened_attributes is None:
        self._flattened_attributes = self.attributes.select(dim=self.image.band_axis, index=0)
    return self._flattened_attributes

get_flattened_segmentation_mask()

segmentation mask is after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor

Source code in src/meteors/lime.py
123
124
125
126
127
def get_flattened_segmentation_mask(self) -> torch.tensor:
    """segmentation mask is after all only two dimensional tensor with some repeated values, this function returns only two-dimensional tensor"""
    if self._flattened_segmentation_mask is None:
        self._flattened_segmentation_mask = self.segmentation_mask.select(dim=self.image.band_axis, index=0)
    return self._flattened_segmentation_mask

ImageSpectralAttributes

Bases: ImageAttributes

Source code in src/meteors/lime.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
class ImageSpectralAttributes(ImageAttributes):
    band_mask: Annotated[
        np.ndarray | torch.Tensor,
        Field(
            kw_only=False,
            validate_default=True,
            description="Band mask used for the explanation.",
        ),
    ]
    band_names: Annotated[
        dict[str, int],
        Field(
            kw_only=False,
            validate_default=True,
            description="Dictionary that translates the band names into the segment values.",
        ),
    ]

    _flattened_band_mask = None

    @model_validator(mode="after")
    def validate_band_mask(self) -> Self:
        if isinstance(self.band_mask, np.ndarray):
            self.band_mask = torch.tensor(self.band_mask, device=self._device)

        if self.band_mask.device != self._device:
            self.band_mask = self.band_mask.to(self._device)

        if 0 not in self.band_names.values() and 0 in torch.unique(self.band_mask):
            self.band_names["not_included"] = 0

        return self

    def to(self, device: torch.device) -> Self:
        super().to(device)
        self.band_mask = self.band_mask.to(device)
        return self

    def get_flattened_band_mask(self) -> torch.tensor:
        """band mask is after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor"""
        if self._flattened_band_mask is None:
            dims_to_select = [2, 1, 0]
            dims_to_select.remove(self.image.band_axis)
            self._flattened_band_mask = self.band_mask.select(dim=dims_to_select[0], index=0).select(
                dim=dims_to_select[1], index=0
            )
        return self._flattened_band_mask

    def get_flattened_attributes(self) -> torch.tensor:
        """attributions for spectral case are after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor"""
        if self._flattened_attributes is None:
            dims_to_select = [2, 1, 0]
            dims_to_select.remove(self.image.band_axis)
            self._flattened_attributes = self.attributes.select(dim=dims_to_select[0], index=0).select(
                dim=dims_to_select[1], index=0
            )
        return self._flattened_attributes

get_flattened_attributes()

attributions for spectral case are after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor

Source code in src/meteors/lime.py
184
185
186
187
188
189
190
191
192
def get_flattened_attributes(self) -> torch.tensor:
    """attributions for spectral case are after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor"""
    if self._flattened_attributes is None:
        dims_to_select = [2, 1, 0]
        dims_to_select.remove(self.image.band_axis)
        self._flattened_attributes = self.attributes.select(dim=dims_to_select[0], index=0).select(
            dim=dims_to_select[1], index=0
        )
    return self._flattened_attributes

get_flattened_band_mask()

band mask is after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor

Source code in src/meteors/lime.py
174
175
176
177
178
179
180
181
182
def get_flattened_band_mask(self) -> torch.tensor:
    """band mask is after all only one dimensional tensor with some repeated values, this function returns only one-dimensional tensor"""
    if self._flattened_band_mask is None:
        dims_to_select = [2, 1, 0]
        dims_to_select.remove(self.image.band_axis)
        self._flattened_band_mask = self.band_mask.select(dim=dims_to_select[0], index=0).select(
            dim=dims_to_select[1], index=0
        )
    return self._flattened_band_mask

Lime

Bases: Explainer

Source code in src/meteors/lime.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
class Lime(Explainer):
    # should it be any different than base lime?
    explainable_model: ExplainableModel
    interpretable_model: InterpretableModel
    similarity_func: Callable | None = None
    perturb_func: Callable | None = None

    _lime = None

    @model_validator(mode="after")
    def construct_lime(self) -> Self:
        self._lime = LimeBase(
            forward_func=self.explainable_model.forward_func,
            interpretable_model=self.interpretable_model,
            similarity_func=self.similarity_func,
            perturb_func=self.perturb_func,
        )

        return self

    def to(self, device: torch.device) -> Self:
        super().to(device)
        # self.interpretable_model.to(device)
        return self

    @staticmethod
    def get_segmentation_mask(
        image: Image,
        segmentation_method: Literal["patch", "slic"] = "slic",
        segmentation_method_params={},
    ) -> torch.Tensor:
        if segmentation_method == "slic":
            return Lime.__get_slick_segmentation_mask(image, **segmentation_method_params)
        if segmentation_method == "patch":
            return Lime.__get_patch_segmentation_mask(image, **segmentation_method_params)
        raise NotImplementedError("Only slic and patch methods are supported for now")

    @staticmethod
    def get_band_mask(
        image: Image,
        band_names: list[str | Sequence[str]] | dict[tuple[str, ...] | str, int],
    ) -> tuple[torch.Tensor, dict[tuple[str, ...] | str, int]]:
        """function generates band mask - an array that corresponds to the image, which values are different segments.
        Args:
            image (Image): A Hyperspectral image
            band_names ((List[str | List[str]]) | (Dict[str | List[str], Iterable[int]])): list of band names that should be treated as one segment or dictionary containing

        Returns:
            Tuple[torch.Tensor, Dict[str, int]]: a tuple which consists of an actual band mask and a dictionary that translates the band names into the segment values
        """

        if isinstance(band_names, dict):
            band_names_dict = {tuple(k) if not isinstance(k, str) else k: v for k, v in band_names.items()}
        else:
            band_names_dict = Lime.__get_band_dict_from_list(band_names)  # type: ignore

        band_names_simplified = {
            str(segment[0]) if isinstance(segment, tuple) and len(segment) == 1 else segment: value
            for segment, value in band_names_dict.items()
        }

        return (
            Lime.__get_band_mask_from_names_dict(image, band_names_dict),  # type: ignore
            band_names_simplified,  # type: ignore
        )

    @staticmethod
    def __get_band_dict_from_list(
        band_names_list: list[str | Sequence[str]],
    ) -> dict[tuple[str, ...], int]:
        band_names_dict = {}
        for idx, segment in enumerate(band_names_list):
            if isinstance(segment, str):
                segment = [segment]
            segment = tuple(segment)

            band_names_dict[segment] = idx + 1
        return band_names_dict

    @staticmethod
    def __get_band_mask_from_names_dict(image: Image, band_names: dict[tuple[str, ...] | str, int]) -> torch.Tensor:
        grouped_band_names = Lime.__get_grouped_band_names(band_names)

        device = image.image.device
        resolution_segments = Lime.__get_resolution_segments(image.wavelengths, grouped_band_names, device=device)

        axis = [0, 1, 2]
        axis.remove(image.band_axis)

        band_mask = resolution_segments.unsqueeze(axis[0]).unsqueeze(axis[1])
        size_image = image.image.size()
        size_mask = band_mask.size()

        repeat_dims = [s2 // s1 for s1, s2 in zip(size_mask, size_image)]
        band_mask = band_mask.repeat(repeat_dims)

        return band_mask

    @staticmethod
    def __get_grouped_band_names(
        band_names: dict[tuple[str, ...] | str, int],
    ) -> dict[tuple[str, ...], int]:
        # function extracts band names or indices based on the spyndex library
        # also checks if the given names are valid

        grouped_band_names = {}

        for segment in band_names.keys():
            band_names_segment: list[str] = []
            if isinstance(segment, str):
                segment = tuple([segment])

            for band_name in segment:
                if band_name in spyndex.indices:
                    band_names_segment = band_names_segment + (spyndex.indices[band_name].bands)
                elif band_name in spyndex.bands:
                    band_names_segment.append(band_name)
                else:
                    raise ValueError(
                        f"Invalid band name {band_name}, band name must be either in `spyndex.indices` or `spyndex.bands`"
                    )

            grouped_band_names[tuple(band_names_segment)] = band_names[segment]

        return grouped_band_names

    @staticmethod
    def __get_resolution_segments(
        wavelengths: np.ndarray,
        band_names: dict[tuple[str, ...], int],
        device="cpu",
    ) -> torch.Tensor:
        resolution_segments = torch.zeros(len(wavelengths), dtype=torch.int64, device=device)

        segments = list(band_names.keys())
        for segment in segments[::-1]:
            for band_name in segment:
                min_wavelength = spyndex.bands[band_name].min_wavelength
                max_wavelength = spyndex.bands[band_name].max_wavelength

                for wave_idx, wave_val in enumerate(wavelengths):
                    if min_wavelength <= wave_val <= max_wavelength:
                        resolution_segments[wave_idx] = band_names[segment]

        unique_segments = torch.unique(resolution_segments)
        for segment in band_names.keys():
            if band_names[segment] not in unique_segments:
                display_name = segment
                print(f"bands {display_name} not found in the wavelengths or bands are overlapping")
        return resolution_segments

    def get_spatial_attributes(
        self,
        image: Image,
        segmentation_mask: np.ndarray | torch.Tensor | None = None,
        target=None,
        segmentation_method: Literal["slic", "patch"] = "slic",
        segmentation_method_params: dict | None = {},
    ) -> ImageSpatialAttributes:
        assert self._lime is not None, "Lime object not initialized"

        assert self.explainable_model.problem_type == "regression", "For now only the regression problem is supported"

        if segmentation_mask is None:
            segmentation_mask = self.get_segmentation_mask(image, segmentation_method, segmentation_method_params)

        if isinstance(image.image, np.ndarray):
            image.image = torch.tensor(image.image, device=self._device)
        elif isinstance(image.image, torch.Tensor):
            image.image = image.image.to(self._device)

        if isinstance(image.binary_mask, np.ndarray):
            image.binary_mask = torch.tensor(image.image, device=self._device)
        elif isinstance(image.binary_mask, torch.Tensor):
            image.binary_mask = image.binary_mask.to(self._device)

        if isinstance(segmentation_mask, np.ndarray):
            segmentation_mask = torch.tensor(segmentation_mask, device=self._device)
        elif isinstance(segmentation_mask, torch.Tensor):
            segmentation_mask = segmentation_mask.to(self._device)

        assert (
            segmentation_mask.device == self._device
        ), f"Segmentation mask should be on the same device as explainable model {self._device}"
        assert (
            image.image.device == self._device
        ), f"Image data should be on the same device as explainable model {self._device}"

        assert isinstance(self._lime, LimeBase), "Lime object not initialized"

        lime_attributes, score = self._lime.attribute(
            inputs=image.image.unsqueeze(0),
            target=target,
            feature_mask=segmentation_mask.unsqueeze(0),
            n_samples=10,
            perturbations_per_eval=4,
            show_progress=True,
            return_input_shape=True,
        )

        spatial_attribution = ImageSpatialAttributes(
            image=image,
            attributes=lime_attributes[0],
            segmentation_mask=segmentation_mask,
            score=score,
        )

        return spatial_attribution

    def get_spectral_attributes(
        self,
        image: Image,
        band_mask: np.ndarray | torch.Tensor | None = None,
        target=None,
        band_names: list[str] | dict[str | tuple[str, ...], int] | None = None,
        verbose=False,
    ) -> ImageSpectralAttributes:
        assert self._lime is not None, "Lime object not initialized"

        assert self.explainable_model.problem_type == "regression", "For now only the regression problem is supported"

        if isinstance(image.image, np.ndarray):
            image.image = torch.tensor(image.image, device=self._device)
        elif isinstance(image.image, torch.Tensor):
            image.image = image.image.to(self._device)

        if isinstance(image.binary_mask, np.ndarray):
            image.binary_mask = torch.tensor(image.image, device=self._device)
        elif isinstance(image.binary_mask, torch.Tensor):
            image.binary_mask = image.binary_mask.to(self._device)

        assert (
            image.image.device == self._device
        ), f"Image data should be on the same device as explainable model {self._device}"

        if band_mask is None:
            band_mask, band_names = self.get_band_mask(image, band_names)  # type: ignore
        elif band_names is None:
            unique_segments = torch.unique(band_mask)
            band_names = {segment: idx for idx, segment in enumerate(unique_segments)}
        else:
            # checking consistency of names
            # unique_segments = torch.unique(band_mask)
            # if isinstance(band_names, dict):
            #     assert set(unique_segments).issubset(set(band_names.values())), "Incorrect band names"
            pass

        if isinstance(band_mask, np.ndarray):
            band_mask = torch.tensor(band_mask, device=self._device)
        else:
            band_mask = band_mask.to(self._device)

        assert (
            band_mask.device == self._device
        ), f"Band mask should be on the same device as explainable model {self._device}"

        lime_attributes, score = self._lime.attribute(
            inputs=image.image.unsqueeze(0),
            target=target,
            feature_mask=band_mask.unsqueeze(0),
            n_samples=10,
            perturbations_per_eval=4,
            show_progress=verbose,
            return_input_shape=True,
        )

        lime_attributes = lime_attributes[0]

        spectral_attribution = ImageSpectralAttributes(
            image=image,
            attributes=lime_attributes,
            band_mask=band_mask,
            band_names=band_names,
            score=score,
        )

        return spectral_attribution

    @staticmethod
    def __get_slick_segmentation_mask(image: Image, num_interpret_features: int, *args, **kwargs) -> torch.tensor:
        device = image.image.device
        numpy_image = np.array(image.image.to("cpu"))
        segmentation_mask = slic(
            numpy_image,
            n_segments=num_interpret_features,
            mask=np.array(image.get_flattened_binary_mask().to("cpu")),
            channel_axis=image.band_axis,
            *args,
            **kwargs,
        )

        if np.min(segmentation_mask) == 1:
            segmentation_mask -= 1

        # segmentation_mask = np.repeat(np.expand_dims(segmentation_mask, axis=image.band_axis), repeats=image.image.shape[image.band_axis], axis=image.band_axis)
        segmentation_mask = torch.tensor(segmentation_mask, dtype=torch.int64, device=device)
        segmentation_mask = torch.unsqueeze(segmentation_mask, dim=image.band_axis)
        # segmentation_mask = torch.repeat_interleave(torch.unsqueeze(segmentation_mask, dim=image.band_axis), repeats=image.image.shape[image.band_axis], dim=image.band_axis)
        return segmentation_mask

    @staticmethod
    def __get_patch_segmentation_mask(image: Image, patch_size=10, *args, **kwargs) -> torch.tensor:
        print("Patch segmentation only works for band_index = 0 now")

        device = image.image.device
        if image.image.shape[1] % patch_size != 0 or image.image.shape[2] % patch_size != 0:
            raise ValueError("Invalid patch_size. patch_size must be a factor of both width and height of the image")

        height, width = image.image.shape[1], image.image.shape[2]

        mask_zero = torch.tensor(image.image.bool()[0], device=device)
        idx_mask = torch.arange(height // patch_size * width // patch_size, device=device).reshape(
            height // patch_size, width // patch_size
        )
        idx_mask += 1
        segmentation_mask = torch.repeat_interleave(idx_mask, patch_size, dim=0)
        segmentation_mask = torch.repeat_interleave(segmentation_mask, patch_size, dim=1)
        segmentation_mask = segmentation_mask * mask_zero
        # segmentation_mask = torch.repeat_interleave(torch.unsqueeze(segmentation_mask, dim=image.band_axis), repeats=image.image.shape[image.band_axis], dim=image.band_axis)
        segmentation_mask = torch.unsqueeze(segmentation_mask, dim=image.band_axis)

        mask_idx = np.unique(segmentation_mask).tolist()
        for idx, mask_val in enumerate(mask_idx):
            segmentation_mask[segmentation_mask == mask_val] = idx

        return segmentation_mask

get_band_mask(image, band_names) staticmethod

function generates band mask - an array that corresponds to the image, which values are different segments. Args: image (Image): A Hyperspectral image band_names ((List[str | List[str]]) | (Dict[str | List[str], Iterable[int]])): list of band names that should be treated as one segment or dictionary containing

Returns:

Type Description
tuple[Tensor, dict[tuple[str, ...] | str, int]]

Tuple[torch.Tensor, Dict[str, int]]: a tuple which consists of an actual band mask and a dictionary that translates the band names into the segment values

Source code in src/meteors/lime.py
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
@staticmethod
def get_band_mask(
    image: Image,
    band_names: list[str | Sequence[str]] | dict[tuple[str, ...] | str, int],
) -> tuple[torch.Tensor, dict[tuple[str, ...] | str, int]]:
    """function generates band mask - an array that corresponds to the image, which values are different segments.
    Args:
        image (Image): A Hyperspectral image
        band_names ((List[str | List[str]]) | (Dict[str | List[str], Iterable[int]])): list of band names that should be treated as one segment or dictionary containing

    Returns:
        Tuple[torch.Tensor, Dict[str, int]]: a tuple which consists of an actual band mask and a dictionary that translates the band names into the segment values
    """

    if isinstance(band_names, dict):
        band_names_dict = {tuple(k) if not isinstance(k, str) else k: v for k, v in band_names.items()}
    else:
        band_names_dict = Lime.__get_band_dict_from_list(band_names)  # type: ignore

    band_names_simplified = {
        str(segment[0]) if isinstance(segment, tuple) and len(segment) == 1 else segment: value
        for segment, value in band_names_dict.items()
    }

    return (
        Lime.__get_band_mask_from_names_dict(image, band_names_dict),  # type: ignore
        band_names_simplified,  # type: ignore
    )