diff --git a/.jenkins/check/config/filter_pylint.txt b/.jenkins/check/config/filter_pylint.txt index fd77f3142b0..d37c8247cf8 100644 --- a/.jenkins/check/config/filter_pylint.txt +++ b/.jenkins/check/config/filter_pylint.txt @@ -34,7 +34,6 @@ "mindspore/mindspore/train/serialization.py" "protected-access" "mindspore/mindspore/train/model.py" "protected-access" "mindspore/mindspore/log.py" "protected-access" -"mindspore/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py" "unsupported-assignment-operation" "mindspore/model_zoo/official/cv" "missing-docstring" "mindspore/model_zoo/official/cv" "c-extension-no-member" "mindspore/model_zoo/official/nlp/bert_thor/src/bert_model.py" "redefined-outer-name" @@ -117,8 +116,4 @@ "mindspore/tests/st/ops/ascend/test_aicpu_ops/test_strided_slice.py" "redefined-builtin" "mindspore/tests/st/ops/ascend/test_aicpu_ops/test_strided_slice_grad.py" "redefined-outer-name" "mindspore/tests/st/pynative/parser/test_parser_construct.py" "bad-super-call" -"mindspore/tests/st/explainer/benchmark/_attribution/test_localization.py" "protected-access" -"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_gradcam.py" "not-callable" -"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_gradient.py" "not-callable" -"mindspore/tests/st/explainer/explanation/_attribution/_backprop/test_modified_relu.py" "not-callable" "mindspore/tests/ut/python/optimizer/test_auto_grad.py" "broad-except" diff --git a/cmake/package.cmake b/cmake/package.cmake index bf1435b576b..5f8c4c925ce 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -309,7 +309,6 @@ install( ${CMAKE_SOURCE_DIR}/mindspore/ops ${CMAKE_SOURCE_DIR}/mindspore/communication ${CMAKE_SOURCE_DIR}/mindspore/profiler - ${CMAKE_SOURCE_DIR}/mindspore/explainer ${CMAKE_SOURCE_DIR}/mindspore/compression ${CMAKE_SOURCE_DIR}/mindspore/run_check DESTINATION ${INSTALL_PY_DIR} diff --git a/cmake/package_win.cmake b/cmake/package_win.cmake index e592b22ae42..cbc57a37756 100644 --- a/cmake/package_win.cmake +++ b/cmake/package_win.cmake @@ -195,7 +195,6 @@ install( ${CMAKE_SOURCE_DIR}/mindspore/ops ${CMAKE_SOURCE_DIR}/mindspore/communication ${CMAKE_SOURCE_DIR}/mindspore/profiler - ${CMAKE_SOURCE_DIR}/mindspore/explainer ${CMAKE_SOURCE_DIR}/mindspore/compression ${CMAKE_SOURCE_DIR}/mindspore/run_check DESTINATION ${INSTALL_PY_DIR} diff --git a/mindspore/explainer/OWNERS b/mindspore/explainer/OWNERS deleted file mode 100644 index 53955dac166..00000000000 --- a/mindspore/explainer/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- ouwenchang -- wangyue01 -- wenkai_dist -- lilongfei15 -- lixiaohui33 diff --git a/mindspore/explainer/__init__.py b/mindspore/explainer/__init__.py deleted file mode 100644 index 560f56857a0..00000000000 --- a/mindspore/explainer/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Provides explanation runner high-level APIs.""" - -from ._image_classification_runner import ImageClassificationRunner - -__all__ = ['ImageClassificationRunner'] diff --git a/mindspore/explainer/_image_classification_runner.py b/mindspore/explainer/_image_classification_runner.py deleted file mode 100644 index 9be4fe97c0e..00000000000 --- a/mindspore/explainer/_image_classification_runner.py +++ /dev/null @@ -1,1074 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Image Classification Runner.""" -import os -import re -import json -from time import time - -import numpy as np -from scipy.stats import beta -from PIL import Image - -import mindspore as ms -from mindspore import context -from mindspore import log -import mindspore.dataset as ds -from mindspore.dataset import Dataset -from mindspore.nn import Cell, SequentialCell -from mindspore.ops.operations import ExpandDims -from mindspore.train._utils import check_value_type -from mindspore.train.summary._summary_adapter import _convert_image_format -from mindspore.train.summary.summary_record import SummaryRecord -from mindspore.train.summary_pb2 import Explain -from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation -from mindspore.explainer.benchmark import Localization -from mindspore.explainer.benchmark._attribution.metric import AttributionMetric -from mindspore.explainer.benchmark._attribution.metric import LabelSensitiveMetric -from mindspore.explainer.benchmark._attribution.metric import LabelAgnosticMetric -from mindspore.explainer.explanation import RISE -from mindspore.explainer.explanation._attribution.attribution import Attribution -from mindspore.explainer.explanation._counterfactual import hierarchical_occlusion as hoc -from mindspore.explainer._utils import deprecated_error - - -_EXPAND_DIMS = ExpandDims() - - -def _normalize(img_np): - """Normalize the numpy image to the range of [0, 1]. """ - max_ = img_np.max() - min_ = img_np.min() - normed = (img_np - min_) / (max_ - min_).clip(min=1e-10) - return normed - - -def _np_to_image(img_np, mode): - """Convert numpy array to PIL image.""" - return Image.fromarray(np.uint8(img_np * 255), mode=mode) - - -class _Verifier: - """Verification of dataset and settings of ImageClassificationRunner.""" - ALL = 0xFFFFFFFF - REGISTRATION = 1 - DATA_N_NETWORK = 1 << 1 - SALIENCY = 1 << 2 - HOC = 1 << 3 - ENVIRONMENT = 1 << 4 - - def _verify(self, flags): - """ - Verify datasets and settings. - - Args: - flags (int): Verification flags, use bitwise or '|' to combine multiple flags. - Possible bitwise flags are shown as follow. - - - ALL: Verify everything. - - REGISTRATION: Verify explainer module registration. - - DATA_N_NETWORK: Verify dataset and network. - - SALIENCY: Verify saliency related settings. - - HOC: Verify HOC related settings. - - ENVIRONMENT: Verify the runtime environment. - - Raises: - ValueError: Be raised for any data or settings' value problem. - TypeError: Be raised for any data or settings' type problem. - RuntimeError: Be raised for any runtime problem. - """ - if flags & self.ENVIRONMENT: - device_target = context.get_context('device_target') - if device_target not in ("Ascend", "GPU"): - raise RuntimeError(f"Unsupported device_target: '{device_target}', " - f"only 'Ascend' or 'GPU' is supported. " - f"Please call context.set_context(device_target='Ascend') or " - f"context.set_context(device_target='GPU').") - if flags & (self.ENVIRONMENT | self.SALIENCY): - if self._is_saliency_registered: - mode = context.get_context('mode') - if mode != context.PYNATIVE_MODE: - raise RuntimeError("Context mode: GRAPH_MODE is not supported, " - "please call context.set_context(mode=context.PYNATIVE_MODE).") - - if flags & self.REGISTRATION: - if self._is_uncertainty_registered and not self._is_saliency_registered: - raise ValueError("Function register_uncertainty() is called but register_saliency() is not.") - if not self._is_saliency_registered and not self._is_hoc_registered: - raise ValueError( - "No explanation module was registered, user should at least call register_saliency() " - "or register_hierarchical_occlusion() once with proper arguments.") - - if flags & (self.DATA_N_NETWORK | self.SALIENCY | self.HOC): - self._verify_data() - - if flags & self.DATA_N_NETWORK: - self._verify_network() - - if flags & self.SALIENCY: - self._verify_saliency() - - def _verify_labels(self): - """Verify labels.""" - label_set = set() - if not self._labels: - raise ValueError(f"The label list provided is empty.") - for i, label in enumerate(self._labels): - if label.strip() == "": - raise ValueError(f"Label [{i}] is all whitespaces or empty. Please make sure there is " - f"no empty label.") - if label in label_set: - raise ValueError(f"Duplicated label:{label}! Please make sure all labels are unique.") - label_set.add(label) - - def _verify_ds_inputs_shape(self, sample, inputs, labels): - """Verify a dataset sample's input shape.""" - - if len(inputs.shape) > 4 or len(inputs.shape) < 3 or inputs.shape[-3] not in [1, 3, 4]: - raise ValueError( - "Image shape {} is unrecognizable: the dimension of image can only be CHW or NCHW.".format( - inputs.shape)) - if len(inputs.shape) == 3: - log.warning( - "Image shape {} is 3-dimensional. All the data will be automatically unsqueezed at the 0-th" - " dimension as batch data.".format(inputs.shape)) - if len(sample) > 1: - if len(labels.shape) > 2 and (np.array(labels.shape[1:]) > 1).sum() > 1: - raise ValueError( - "Labels shape {} is unrecognizable: outputs should not have more than two dimensions" - " with length greater than 1.".format(labels.shape)) - - if self._is_hoc_registered: - if inputs.shape[-3] != 3: - raise ValueError( - "Hierarchical occlusion is registered, images must be in 3 channels format, but " - "{} channel(s) is(are) encountered.".format(inputs.shape[-3])) - short_side = min(inputs.shape[-2:]) - if short_side < hoc.AUTO_IMAGE_SHORT_SIDE_MIN: - raise ValueError( - "Hierarchical occlusion is registered, images' short side must be equals to or greater then " - "{}, but {} is encountered.".format(hoc.AUTO_IMAGE_SHORT_SIDE_MIN, short_side)) - - def _verify_ds_sample(self, sample): - """Verify a dataset sample.""" - if len(sample) not in [1, 2, 3]: - raise ValueError("The dataset should provide [images] or [images, labels], [images, labels, bboxes]" - " as columns.") - - if len(sample) == 3: - inputs, labels, bboxes = sample - if bboxes.shape[-1] != 4: - raise ValueError("The third element of dataset should be bounding boxes with shape of " - "[batch_size, num_ground_truth, 4].") - else: - if self._benchmarkers is not None: - if any([isinstance(bench, Localization) for bench in self._benchmarkers]): - raise ValueError("The dataset must provide bboxes if Localization is to be computed.") - - if len(sample) == 2: - inputs, labels = sample - if len(sample) == 1: - inputs = sample[0] - - self._verify_ds_inputs_shape(sample, inputs, labels) - - def _verify_data(self): - """Verify dataset and labels.""" - self._verify_labels() - - try: - sample = next(self._dataset.create_tuple_iterator()) - except StopIteration: - raise ValueError("The dataset provided is empty.") - - self._verify_ds_sample(sample) - - def _verify_network(self): - """Verify the network.""" - next_element = next(self._dataset.create_tuple_iterator()) - inputs, _, _ = self._unpack_next_element(next_element) - prop_test = self._full_network(inputs) - check_value_type("output of network in explainer", prop_test, ms.Tensor) - if prop_test.shape[1] != len(self._labels): - raise ValueError("The dimension of network output does not match the no. of classes. Please " - "check labels or the network in the explainer again.") - - def _verify_saliency(self): - """Verify the saliency settings.""" - if self._explainers: - explainer_classes = [] - for explainer in self._explainers: - if explainer.__class__ in explainer_classes: - raise ValueError(f"Repeated {explainer.__class__.__name__} explainer! " - "Please make sure all explainers' class is distinct.") - if explainer.network is not self._network: - raise ValueError(f"The network of {explainer.__class__.__name__} explainer is different " - "instance from network of runner. Please make sure they are the same " - "instance.") - explainer_classes.append(explainer.__class__) - if self._benchmarkers: - benchmarker_classes = [] - for benchmarker in self._benchmarkers: - if benchmarker.__class__ in benchmarker_classes: - raise ValueError(f"Repeated {benchmarker.__class__.__name__} benchmarker! " - "Please make sure all benchmarkers' class is distinct.") - if isinstance(benchmarker, LabelSensitiveMetric) and benchmarker.num_labels != len(self._labels): - raise ValueError(f"The num_labels of {benchmarker.__class__.__name__} benchmarker is different " - "from no. of labels of runner. Please make them are the same.") - benchmarker_classes.append(benchmarker.__class__) - - -@deprecated_error -class ImageClassificationRunner(_Verifier): - """ - A high-level API for users to generate and store results of the explanation methods and the evaluation methods. - - Update in 2020.11: Adjust the storage structure and format of the data. Summary files generated by previous version - will be deprecated and will not be supported in MindInsight of current version. - - Args: - summary_dir (str): The directory path to save the summary files which store the generated results. - data (tuple[Dataset, list[str]]): Tuple of dataset and the corresponding class label list. The dataset - should provides [images], [images, labels] or [images, labels, bboxes] as columns. The label list must - share the exact same length and order of the network outputs. - network (Cell): The network(with logit outputs) to be explained. - activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For - single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification - tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long - as when combining this function with network, the final output is the probability of the input. - - Raises: - TypeError: Be raised for any argument type problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> from mindspore.explainer import ImageClassificationRunner - >>> from mindspore.explainer.explanation import GuidedBackprop, Gradient - >>> from mindspore.explainer.benchmark import Faithfulness - >>> from mindspore.nn import Softmax - >>> from mindspore.train.serialization import load_checkpoint, load_param_into_net - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of AlexNet is shown in model_zoo.official.cv.alexnet.src.alexnet.py - >>> net = AlexNet(10) - >>> # Load the checkpoint - >>> param_dict = load_checkpoint("/path/to/checkpoint") - >>> load_param_into_net(net, param_dict) - [] - >>> - >>> # Prepare the dataset for explaining and evaluation. - >>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py - >>> - >>> dataset = create_dataset_cifar10("/path/to/cifar/dataset", 1) - >>> labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] - >>> - >>> activation_fn = Softmax() - >>> gbp = GuidedBackprop(net) - >>> gradient = Gradient(net) - >>> explainers = [gbp, gradient] - >>> faithfulness = Faithfulness(len(labels), activation_fn, "NaiveFaithfulness") - >>> benchmarkers = [faithfulness] - >>> - >>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn) - >>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers) - >>> runner.run() - """ - - # datafile directory names - _DATAFILE_DIRNAME_PREFIX = "_explain_" - _ORIGINAL_IMAGE_DIRNAME = "origin_images" - _HEATMAP_DIRNAME = "heatmap" - # specfial filenames - _MANIFEST_FILENAME = "manifest.json" - # max. no. of sample per directory - _SAMPLE_PER_DIR = 1000 - # seed for fixing the iterating order of the dataset - _DATASET_SEED = 58 - # printing spacer - _SPACER = "{:120}\r" - # datafile directory's permission - _DIR_MODE = 0o700 - # datafile's permission - _FILE_MODE = 0o400 - - def __init__(self, - summary_dir, - data, - network, - activation_fn): - - check_value_type("data", data, tuple) - if len(data) != 2: - raise ValueError("Argument data is not a tuple with 2 elements") - check_value_type("data[0]", data[0], Dataset) - check_value_type("data[1]", data[1], list) - if not all(isinstance(ele, str) for ele in data[1]): - raise ValueError("Argument data[1] is not list of str.") - - check_value_type("summary_dir", summary_dir, str) - check_value_type("network", network, Cell) - check_value_type("activation_fn", activation_fn, Cell) - - self._summary_dir = summary_dir - self._dataset = data[0] - self._labels = data[1] - self._network = network - self._explainers = None - self._benchmarkers = None - self._uncertainty = None - self._hoc_searcher = None - self._summary_timestamp = None - self._sample_index = -1 - self._manifest = None - - self._full_network = SequentialCell([self._network, activation_fn]) - self._full_network.set_train(False) - - self._verify(_Verifier.DATA_N_NETWORK | _Verifier.ENVIRONMENT) - - def register_saliency(self, - explainers, - benchmarkers=None): - """ - Register saliency explanation instances. - - .. warning:: - This function can not be invoked more than once on each runner. - - Args: - explainers (list[Attribution]): The explainers to be evaluated, - see `mindspore.explainer.explanation`. All explainers' class must be distinct and their network - must be the exact same instance of the runner's network. - benchmarkers (list[AttributionMetric], optional): The benchmarkers for scoring the explainers, - see `mindspore.explainer.benchmark`. All benchmarkers' class must be distinct. - - Raises: - ValueError: Be raised for any data or settings' value problem. - TypeError: Be raised for any data or settings' type problem. - RuntimeError: Be raised if this function was invoked before. - """ - check_value_type("explainers", explainers, list) - if not all(isinstance(ele, Attribution) for ele in explainers): - raise TypeError("Argument explainers is not list of mindspore.explainer.explanation .") - - if not explainers: - raise ValueError("Argument explainers is empty.") - - if benchmarkers is not None: - check_value_type("benchmarkers", benchmarkers, list) - if not all(isinstance(ele, AttributionMetric) for ele in benchmarkers): - raise TypeError("Argument benchmarkers is not list of mindspore.explainer.benchmark .") - - if self._explainers is not None: - raise RuntimeError("Function register_saliency() was invoked already.") - - self._explainers = explainers - self._benchmarkers = benchmarkers - - try: - self._verify(_Verifier.SALIENCY | _Verifier.ENVIRONMENT) - except (ValueError, TypeError): - self._explainers = None - self._benchmarkers = None - raise - - def register_hierarchical_occlusion(self): - """ - Register hierarchical occlusion instances. - - .. warning:: - This function can not be invoked more than once on each runner. - - Note: - Input images are required to be in 3 channels formats and the length of side short must be equals to or - greater than 56 pixels. - - Raises: - ValueError: Be raised for any data or settings' value problem. - RuntimeError: Be raised if the function was called already. - """ - if self._hoc_searcher is not None: - raise RuntimeError("Function register_hierarchical_occlusion() was invoked already.") - - self._hoc_searcher = hoc.Searcher(self._full_network) - - try: - self._verify(_Verifier.HOC | _Verifier.ENVIRONMENT) - except ValueError: - self._hoc_searcher = None - raise - - def register_uncertainty(self): - """ - Register uncertainty instance to compute the epistemic uncertainty base on the Bayes' theorem. - - .. warning:: - This function can not be invoked more than once on each runner. - - Note: - Please refer to the documentation of mindspore.nn.probability.toolbox.uncertainty_evaluation for the - details. The actual output is standard deviation of the classification predictions and the corresponding - 95% confidence intervals. Users have to invoke register_saliency() as well for the uncertainty results are - going to be shown on the saliency map page in MindInsight. - - Raises: - RuntimeError: Be raised if the function was called already. - """ - if self._uncertainty is not None: - raise RuntimeError("Function register_uncertainty() was invoked already.") - - self._uncertainty = UncertaintyEvaluation(model=self._full_network, - train_dataset=None, - task_type='classification', - num_classes=len(self._labels)) - - def run(self): - """ - Run the explain job and save the result as a summary in summary_dir. - - Note: - User should call register_saliency() once before running this function. - - Raises: - ValueError: Be raised for any data or settings' value problem. - TypeError: Be raised for any data or settings' type problem. - RuntimeError: Be raised for any runtime problem. - """ - self._verify(_Verifier.ALL) - self._manifest = {"saliency_map": False, - "benchmark": False, - "uncertainty": False, - "hierarchical_occlusion": False} - with SummaryRecord(self._summary_dir, raise_exception=True) as summary: - print("Start running and writing......") - begin = time() - - self._summary_timestamp = self._extract_timestamp(summary.file_info['file_name']) - if self._summary_timestamp is None: - raise RuntimeError("Cannot extract timestamp from summary filename!" - " It should contains a timestamp after 'summary.' .") - - self._save_metadata(summary) - - imageid_labels = self._run_inference(summary) - sample_count = self._sample_index - if self._is_saliency_registered: - self._run_saliency(summary, imageid_labels) - if not self._manifest["saliency_map"]: - raise RuntimeError( - f"No saliency map was generated in {sample_count} samples. " - f"Please make sure the dataset, labels, activation function and network are properly trained " - f"and configured.") - - if self._is_hoc_registered and not self._manifest["hierarchical_occlusion"]: - raise RuntimeError( - f"No Hierarchical Occlusion result was found in {sample_count} samples. " - f"Please make sure the dataset, labels, activation function and network are properly trained " - f"and configured.") - - self._save_manifest() - - print("Finish running and writing. Total time elapsed: {:.3f} s".format(time() - begin)) - - @property - def _is_hoc_registered(self): - """Check if HOC module is registered.""" - return self._hoc_searcher is not None - - @property - def _is_saliency_registered(self): - """Check if saliency module is registered.""" - return bool(self._explainers) - - @property - def _is_uncertainty_registered(self): - """Check if uncertainty module is registered.""" - return self._uncertainty is not None - - def _save_metadata(self, summary): - """Save metadata of the explain job to summary.""" - print("Start writing metadata......") - - explain = Explain() - explain.metadata.label.extend(self._labels) - - if self._is_saliency_registered: - exp_names = [exp.__class__.__name__ for exp in self._explainers] - explain.metadata.explain_method.extend(exp_names) - if self._benchmarkers is not None: - bench_names = [bench.__class__.__name__ for bench in self._benchmarkers] - explain.metadata.benchmark_method.extend(bench_names) - - summary.add_value("explainer", "metadata", explain) - summary.record(1) - - print("Finish writing metadata.") - - def _run_inference(self, summary, threshold=0.5): - """ - Run inference for the dataset and write the inference related data into summary. - - Args: - summary (SummaryRecord): The summary object to store the data. - threshold (float): The threshold for prediction. - - Returns: - dict, The map of sample d to the union of its ground truth and predicted labels. - """ - sample_id_labels = {} - self._sample_index = 0 - ds.config.set_seed(self._DATASET_SEED) - for j, batch in enumerate(self._dataset): - now = time() - self._infer_batch(summary, batch, sample_id_labels, threshold) - self._spaced_print("Finish running and writing {}-th batch inference data." - " Time elapsed: {:.3f} s".format(j, time() - now)) - return sample_id_labels - - def _infer_batch(self, summary, batch, sample_id_labels, threshold): - """ - Infer a batch. - - Args: - summary (SummaryRecord): The summary object to store the data. - batch (tuple): The next dataset sample. - sample_id_labels (dict): The sample id to labels dictionary. - threshold (float): The threshold for prediction. - """ - inputs, labels, _ = self._unpack_next_element(batch) - prob = self._full_network(inputs).asnumpy() - - if self._uncertainty is not None: - prob_var = self._uncertainty.eval_epistemic_uncertainty(inputs) - else: - prob_var = None - - for idx, inp in enumerate(inputs): - gt_labels = labels[idx] - gt_probs = [float(prob[idx][i]) for i in gt_labels] - - if prob_var is not None: - gt_prob_vars = [float(prob_var[idx][i]) for i in gt_labels] - gt_itl_lows, gt_itl_his, gt_prob_sds = \ - self._calc_beta_intervals(gt_probs, gt_prob_vars) - - data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW') - original_image = _np_to_image(_normalize(data_np), mode='RGB') - original_image_path = self._save_original_image(self._sample_index, original_image) - - predicted_labels = [int(i) for i in (prob[idx] > threshold).nonzero()[0]] - predicted_probs = [float(prob[idx][i]) for i in predicted_labels] - - if prob_var is not None: - predicted_prob_vars = [float(prob_var[idx][i]) for i in predicted_labels] - predicted_itl_lows, predicted_itl_his, predicted_prob_sds = \ - self._calc_beta_intervals(predicted_probs, predicted_prob_vars) - - union_labs = list(set(gt_labels + predicted_labels)) - sample_id_labels[str(self._sample_index)] = union_labs - - explain = Explain() - explain.sample_id = self._sample_index - explain.image_path = original_image_path - summary.add_value("explainer", "sample", explain) - - explain = Explain() - explain.sample_id = self._sample_index - explain.ground_truth_label.extend(gt_labels) - explain.inference.ground_truth_prob.extend(gt_probs) - explain.inference.predicted_label.extend(predicted_labels) - explain.inference.predicted_prob.extend(predicted_probs) - - if prob_var is not None: - explain.inference.ground_truth_prob_sd.extend(gt_prob_sds) - explain.inference.ground_truth_prob_itl95_low.extend(gt_itl_lows) - explain.inference.ground_truth_prob_itl95_hi.extend(gt_itl_his) - explain.inference.predicted_prob_sd.extend(predicted_prob_sds) - explain.inference.predicted_prob_itl95_low.extend(predicted_itl_lows) - explain.inference.predicted_prob_itl95_hi.extend(predicted_itl_his) - - self._manifest["uncertainty"] = True - - summary.add_value("explainer", "inference", explain) - summary.record(1) - - if self._is_hoc_registered: - self._run_hoc(summary, self._sample_index, inputs[idx], prob[idx]) - - self._sample_index += 1 - - def _run_explainer(self, summary, sample_id_labels, explainer): - """ - Run the explainer. - - Args: - summary (SummaryRecord): The summary object to store the data. - sample_id_labels (dict): A dict that maps the sample id and its union labels. - explainer (_Attribution): An Attribution object to generate saliency maps. - """ - for idx, next_element in enumerate(self._dataset): - now = time() - self._spaced_print("Start running {}-th explanation data for {}......".format( - idx, explainer.__class__.__name__)) - saliency_dict_lst = self._run_exp_step(next_element, explainer, sample_id_labels, summary) - self._spaced_print( - "Finish writing {}-th batch explanation data for {}. Time elapsed: {:.3f} s".format( - idx, explainer.__class__.__name__, time() - now)) - - if not self._benchmarkers: - continue - - for bench in self._benchmarkers: - now = time() - self._spaced_print( - "Start running {}-th batch {} data for {}......".format( - idx, bench.__class__.__name__, explainer.__class__.__name__)) - self._run_exp_benchmark_step(next_element, explainer, bench, saliency_dict_lst) - self._spaced_print( - "Finish running {}-th batch {} data for {}. Time elapsed: {:.3f} s".format( - idx, bench.__class__.__name__, explainer.__class__.__name__, time() - now)) - - def _run_saliency(self, summary, sample_id_labels): - """Run the saliency explanations.""" - - for explainer in self._explainers: - explain = Explain() - if self._benchmarkers: - for bench in self._benchmarkers: - bench.reset() - print(f"Start running and writing explanation for {explainer.__class__.__name__}......") - self._sample_index = 0 - start = time() - ds.config.set_seed(self._DATASET_SEED) - self._run_explainer(summary, sample_id_labels, explainer) - - if not self._benchmarkers: - continue - - for bench in self._benchmarkers: - benchmark = explain.benchmark.add() - benchmark.explain_method = explainer.__class__.__name__ - benchmark.benchmark_method = bench.__class__.__name__ - - benchmark.total_score = bench.performance - if isinstance(bench, LabelSensitiveMetric): - benchmark.label_score.extend(bench.class_performances) - - self._spaced_print("Finish running and writing explanation and benchmark data for {}. " - "Time elapsed: {:.3f} s".format(explainer.__class__.__name__, time() - start)) - summary.add_value('explainer', 'benchmark', explain) - summary.record(1) - - def _run_hoc(self, summary, sample_id, sample_input, prob): - """ - Run HOC search for a sample image, and then save the result to summary. - - Args: - summary (SummaryRecord): The summary object to store the data. - sample_id (int): The sample ID. - sample_input (Union[Tensor, np.ndarray]): Sample image tensor in CHW or NCWH(N=1). - prob (Union[Tensor, np.ndarray]): List of sample's classification prediction output, HOC will run for - labels with prediction output strictly larger then HOC searcher's threshold(0.5 by default). - """ - if isinstance(sample_input, ms.Tensor): - sample_input = sample_input.asnumpy() - if len(sample_input.shape) == 3: - sample_input = np.expand_dims(sample_input, axis=0) - - explain = None - str_mask = hoc.auto_str_mask(sample_input) - compiled_mask = None - - for label_idx, label_prob in enumerate(prob): - if label_prob <= self._hoc_searcher.threshold: - continue - if compiled_mask is None: - compiled_mask = hoc.compile_mask(str_mask, sample_input) - try: - edit_tree, layer_outputs = self._hoc_searcher.search(sample_input, label_idx, compiled_mask) - except hoc.NoValidResultError: - log.warning(f"No Hierarchical Occlusion result was found in sample#{sample_id} " - f"label:{self._labels[label_idx]}, skipped.") - continue - - if explain is None: - explain = Explain() - explain.sample_id = sample_id - - self._add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain) - - if explain is not None: - summary.add_value("explainer", "hoc", explain) - summary.record(1) - self._manifest['hierarchical_occlusion'] = True - - @staticmethod - def _add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain): - """ - Add HOC result to Explain record. - - Args: - label_idx (int): The label index. - str_mask (str): The mask string. - edit_tree (EditStep): The result HOC edit tree. - layer_outputs (list[float]): The network output confident of each layer. - explain (Explain): The Explain record. - """ - hoc_rec = explain.hoc.add() - hoc_rec.label = label_idx - hoc_rec.mask = str_mask - layer_count = edit_tree.max_layer + 1 - for layer in range(layer_count): - steps = edit_tree.get_layer_or_leaf_steps(layer) - layer_output = layer_outputs[layer] - hoc_layer = hoc_rec.layer.add() - hoc_layer.prob = layer_output - for step in steps: - hoc_layer.box.extend(list(step.box)) - - def _add_exp_step_samples(self, explainer, sample_label_sets, batch_saliency_full, summary): - """ - Add explanation results of samples to summary record. - - Args: - explainer (Attribution): The explainer to be run. - sample_label_sets (list[list[int]]): The label sets of samples. - batch_saliency_full (Tensor): The saliency output from explainer. - summary (SummaryRecord): The summary record. - """ - saliency_dict_lst = [] - has_saliency_rec = False - for idx, label_set in enumerate(sample_label_sets): - saliency_dict = {} - explain = Explain() - explain.sample_id = self._sample_index - for k, lab in enumerate(label_set): - saliency = batch_saliency_full[idx:idx + 1, k:k + 1] - saliency_dict[lab] = saliency - - saliency_np = _normalize(saliency.asnumpy().squeeze()) - saliency_image = _np_to_image(saliency_np, mode='L') - heatmap_path = self._save_heatmap(explainer.__class__.__name__, lab, - self._sample_index, saliency_image) - - explanation = explain.explanation.add() - explanation.explain_method = explainer.__class__.__name__ - explanation.heatmap_path = heatmap_path - explanation.label = lab - - has_saliency_rec = True - - summary.add_value("explainer", "explanation", explain) - summary.record(1) - - self._sample_index += 1 - saliency_dict_lst.append(saliency_dict) - - return saliency_dict_lst, has_saliency_rec - - def _run_exp_step(self, next_element, explainer, sample_id_labels, summary): - """ - Run the explanation for each step and write explanation results into summary. - - Args: - next_element (Tuple): Data of one step - explainer (_Attribution): An Attribution object to generate saliency maps. - sample_id_labels (dict): A dict that maps the sample id and its union labels. - summary (SummaryRecord): The summary object to store the data. - - Returns: - list, List of dict that maps label to its corresponding saliency map. - """ - inputs, labels, _ = self._unpack_next_element(next_element) - sample_index = self._sample_index - sample_label_sets = [] - for _ in range(len(labels)): - sample_label_sets.append(sample_id_labels[str(sample_index)]) - sample_index += 1 - - batch_label_sets = self._make_label_batch(sample_label_sets) - - if isinstance(explainer, RISE): - batch_saliency_full = explainer(inputs, batch_label_sets) - else: - batch_saliency_full = [] - for i in range(len(batch_label_sets[0])): - batch_saliency = explainer(inputs, batch_label_sets[:, i]) - batch_saliency_full.append(batch_saliency) - concat = ms.ops.operations.Concat(1) - batch_saliency_full = concat(tuple(batch_saliency_full)) - - saliency_dict_lst, has_saliency_rec = \ - self._add_exp_step_samples(explainer, sample_label_sets, batch_saliency_full, summary) - - if has_saliency_rec: - self._manifest['saliency_map'] = True - - return saliency_dict_lst - - def _run_exp_benchmark_step(self, next_element, explainer, benchmarker, saliency_dict_lst): - """Run the explanation and evaluation for each step and write explanation results into summary.""" - inputs, labels, _ = self._unpack_next_element(next_element) - for idx, inp in enumerate(inputs): - inp = _EXPAND_DIMS(inp, 0) - self._manifest['benchmark'] = True - if isinstance(benchmarker, LabelAgnosticMetric): - res = benchmarker.evaluate(explainer, inp) - benchmarker.aggregate(res) - continue - saliency_dict = saliency_dict_lst[idx] - for label, saliency in saliency_dict.items(): - if isinstance(benchmarker, Localization): - _, _, bboxes = self._unpack_next_element(next_element, True) - if label in labels[idx]: - res = benchmarker.evaluate(explainer, inp, targets=label, mask=bboxes[idx][label], - saliency=saliency) - benchmarker.aggregate(res, label) - elif isinstance(benchmarker, LabelSensitiveMetric): - res = benchmarker.evaluate(explainer, inp, targets=label, saliency=saliency) - benchmarker.aggregate(res, label) - else: - raise TypeError('Benchmarker must be one of LabelSensitiveMetric or LabelAgnosticMetric, but' - 'receive {}'.format(type(benchmarker))) - - @staticmethod - def _calc_beta_intervals(means, variances, prob=0.95): - """Calculate confidence interval of beta distributions.""" - if not isinstance(means, np.ndarray): - means = np.array(means) - if not isinstance(variances, np.ndarray): - variances = np.array(variances) - with np.errstate(divide='ignore'): - coef_a = ((means ** 2) * (1 - means) / variances) - means - coef_b = (coef_a * (1 - means)) / means - itl_lows, itl_his = beta.interval(prob, coef_a, coef_b) - sds = np.sqrt(variances) - for i in range(itl_lows.shape[0]): - if not np.isfinite(sds[i]) or not np.isfinite(itl_lows[i]) or not np.isfinite(itl_his[i]): - itl_lows[i] = means[i] - itl_his[i] = means[i] - sds[i] = 0 - return itl_lows, itl_his, sds - - def _transform_bboxes(self, inputs, labels, bboxes, ifbbox): - """ - Transform the bounding boxes. - Args: - inputs (Tensor): the image data - labels (Tensor): the labels - bboxes (Tensor): the boudnding boxes data - ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t - label id will be returned. If False, the returned bboxes is the the parsed bboxes. - - Returns: - bboxes (Union[list[dict], None, Tensor]): the bounding boxes - """ - input_len = len(inputs) - if bboxes is None or not ifbbox: - return bboxes - bboxes = ms.Tensor(bboxes, ms.int32) - masks_lst = [] - labels = labels.asnumpy().reshape([input_len, -1]) - bboxes = bboxes.asnumpy().reshape([input_len, -1, 4]) - for idx, label in enumerate(labels): - height, width = inputs[idx].shape[-2], inputs[idx].shape[-1] - masks = {} - for j, label_item in enumerate(label): - target = int(label_item) - if not -1 < target < len(self._labels): - continue - if target not in masks: - mask = np.zeros((1, 1, height, width)) - else: - mask = masks[target] - x_min, y_min, x_len, y_len = bboxes[idx][j].astype(int) - mask[:, :, x_min:x_min + x_len, y_min:y_min + y_len] = 1 - masks[target] = mask - masks_lst.append(masks) - bboxes = masks_lst - return bboxes - - def _transform_data(self, inputs, labels, bboxes, ifbbox): - """ - Transform the data from one iteration of dataset to a unifying form for the follow-up operations. - - Args: - inputs (Tensor): the image data - labels (Tensor): the labels - bboxes (Tensor): the boudnding boxes data - ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t - label id will be returned. If False, the returned bboxes is the the parsed bboxes. - - Returns: - inputs (Tensor): the image data, unified to a 4D Tensor. - labels (list[list[int]]): the ground truth labels. - bboxes (Union[list[dict], None, Tensor]): the bounding boxes - """ - inputs = ms.Tensor(inputs, ms.float32) - if len(inputs.shape) == 3: - inputs = _EXPAND_DIMS(inputs, 0) - if isinstance(labels, ms.Tensor): - labels = ms.Tensor(labels, ms.int32) - labels = _EXPAND_DIMS(labels, 0) - if isinstance(bboxes, ms.Tensor): - bboxes = ms.Tensor(bboxes, ms.int32) - bboxes = _EXPAND_DIMS(bboxes, 0) - - bboxes = self._transform_bboxes(inputs, labels, bboxes, ifbbox) - - labels = ms.Tensor(labels, ms.int32) - if len(labels.shape) == 1: - labels_lst = [[int(i)] for i in labels.asnumpy()] - else: - labels = labels.asnumpy().reshape([len(inputs), -1]) - labels_lst = [] - for item in labels: - labels_lst.append(list(set(int(i) for i in item if -1 < int(i) < len(self._labels)))) - labels = labels_lst - return inputs, labels, bboxes - - def _unpack_next_element(self, next_element, ifbbox=False): - """ - Unpack a single iteration of dataset. - - Args: - next_element (Tuple): a single element iterated from dataset object. - ifbbox (bool): whether to preprocess bboxes in self._transform_data. - - Returns: - tuple, a unified Tuple contains image_data, labels, and bounding boxes. - """ - if len(next_element) == 3: - inputs, labels, bboxes = next_element - elif len(next_element) == 2: - inputs, labels = next_element - bboxes = None - else: - inputs = next_element[0] - labels = [[] for _ in inputs] - bboxes = None - inputs, labels, bboxes = self._transform_data(inputs, labels, bboxes, ifbbox) - return inputs, labels, bboxes - - @staticmethod - def _make_label_batch(labels): - """ - Unify a List of List of labels to be a 2D Tensor with shape (b, m), where b = len(labels) and m is the max - length of all the rows in labels. - - Args: - labels (List[List]): the union labels of a data batch. - - Returns: - 2D Tensor. - """ - max_len = max([len(label) for label in labels]) - batch_labels = np.zeros((len(labels), max_len)) - - for idx, _ in enumerate(batch_labels): - length = len(labels[idx]) - batch_labels[idx, :length] = np.array(labels[idx]) - - return ms.Tensor(batch_labels, ms.int32) - - def _save_manifest(self): - """Save manifest.json underneath datafile directory.""" - if self._manifest is None: - raise RuntimeError("Manifest not yet be initialized.") - path_tokens = [self._summary_dir, - self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp)] - abs_dir_path = self._create_subdir(*path_tokens) - save_path = os.path.join(abs_dir_path, self._MANIFEST_FILENAME) - fd = os.open(save_path, os.O_WRONLY | os.O_CREAT, mode=self._FILE_MODE) - file = os.fdopen(fd, "w") - try: - json.dump(self._manifest, file, indent=4) - except IOError: - log.error(f"Failed to save manifest as {save_path}!") - raise - finally: - file.flush() - os.close(fd) - os.chmod(save_path, self._FILE_MODE) - - def _save_original_image(self, sample_id, image): - """Save an image to summary directory.""" - id_dirname = self._get_sample_dirname(sample_id) - path_tokens = [self._summary_dir, - self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp), - self._ORIGINAL_IMAGE_DIRNAME, - id_dirname] - - abs_dir_path = self._create_subdir(*path_tokens) - filename = f"{sample_id}.jpg" - save_path = os.path.join(abs_dir_path, filename) - image.save(save_path) - os.chmod(save_path, self._FILE_MODE) - return os.path.join(*path_tokens[1:], filename) - - def _save_heatmap(self, explain_method, class_id, sample_id, image): - """Save heatmap image to summary directory.""" - id_dirname = self._get_sample_dirname(sample_id) - path_tokens = [self._summary_dir, - self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp), - self._HEATMAP_DIRNAME, - explain_method, - id_dirname] - - abs_dir_path = self._create_subdir(*path_tokens) - filename = f"{sample_id}_{class_id}.jpg" - save_path = os.path.join(abs_dir_path, filename) - image.save(save_path, optimize=True) - os.chmod(save_path, self._FILE_MODE) - return os.path.join(*path_tokens[1:], filename) - - def _create_subdir(self, *args): - """Recursively create subdirectories.""" - abs_path = None - for token in args: - if abs_path is None: - abs_path = os.path.realpath(token) - else: - abs_path = os.path.join(abs_path, token) - # os.makedirs() don't set intermediate dir permission properly, we mkdir() one by one - try: - os.mkdir(abs_path, mode=self._DIR_MODE) - # In some platform, mode may be ignored in os.mkdir(), we have to chmod() again to make sure - os.chmod(abs_path, mode=self._DIR_MODE) - except FileExistsError: - pass - return abs_path - - @classmethod - def _get_sample_dirname(cls, sample_id): - """Get the name of parent directory of the image id.""" - return str(int(sample_id / cls._SAMPLE_PER_DIR) * cls._SAMPLE_PER_DIR) - - @staticmethod - def _extract_timestamp(filename): - """Extract timestamp from summary filename.""" - matched = re.search(r"summary\.(\d+)", filename) - if matched: - return int(matched.group(1)) - return None - - @classmethod - def _spaced_print(cls, message): - """Spaced message printing.""" - # workaround to print logs starting new line in case line width mismatch. - print(cls._SPACER.format(message)) diff --git a/mindspore/explainer/_operators.py b/mindspore/explainer/_operators.py deleted file mode 100644 index 25a02380262..00000000000 --- a/mindspore/explainer/_operators.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Packaged operations based on MindSpore.""" - -__all__ = [ - 'absolute', - 'arange', - 'argmax', - 'argmin', - 'argsort', - 'assign', - 'intersection', - 'matmul', - 'maximum', - 'minimum', - 'mean', - 'mul', - 'sort', - 'sqrt', - 'squeeze', - 'tile', - 'reshape', - 'zeros', - 'zeros_like', - 'softmax', - 'Tensor', - 'summation' -] - -from typing import List, Tuple, Union, Callable - -import numpy as np - -import mindspore -from mindspore import nn -import mindspore.ops.operations as op - -_Axis = Union[int, Tuple[int, ...], List[int]] -_Idx = Union[int, mindspore.Tensor, Tuple[int, ...], Tuple[mindspore.Tensor, ...]] -_Number = Union[int, float, np.int, np.float] -_Shape = Union[int, Tuple[int, ...]] -Tensor = mindspore.Tensor - - -def absolute(inputs: Tensor) -> Tensor: - """Get the absolute value of a tensor value.""" - abs_op = op.Abs() - outputs = abs_op(inputs) - return outputs - - -def arange( - start: _Number, - end: _Number, - step: _Number = 1, - dtype: mindspore.dtype = None) -> Tensor: - """Get the arange value of tensor.""" - nums = np.arange(start=start, stop=end, step=step, dtype=np.int32) - nums = mindspore.Tensor(nums, dtype=dtype) - return nums - - -def argmax(inputs: Tensor, axis: int = -1, keep_dims: bool = False) -> Tensor: - """Returns the indices of the maximum values along an axis.""" - inputs_np = inputs.asnumpy() - outputs = np.argmax(inputs_np, axis=axis) - - if keep_dims: - outputs = np.expand_dims(outputs, axis=axis) - - return mindspore.Tensor(outputs, mindspore.int32) - - -def argmin(inputs: Tensor, axis: int = -1, keep_dims: bool = False) -> Tensor: - """Returns the indices of the minimum values along an axis.""" - inputs_np = inputs.asnumpy() - outputs = np.argmin(inputs_np, axis=axis) - - if keep_dims: - outputs = np.expand_dims(outputs, axis=axis) - - return mindspore.Tensor(outputs, mindspore.int32) - - -def argsort(inputs: Tensor, axis: int = -1, descending: bool = False) -> Tensor: - """Returns the indices that would sort an array.""" - inputs_np = inputs.asnumpy() - factor = -1 if descending else 1 - indices_np = np.argsort(factor * inputs_np, axis=axis) - indices = mindspore.Tensor(indices_np, dtype=mindspore.int32) - return indices - - -def assign(inputs: Tensor, idx: _Idx, value: Tensor) -> Tensor: - """Assign a tensor value to the given tensor and index.""" - inputs_np = inputs.asnumpy() - if isinstance(idx, Tensor): - idx = idx.asnumpy() - value_np = value.asnumpy() - inputs_np[idx] = value_np - outputs = mindspore.Tensor(inputs_np) - return outputs - - -def intersection(*inputs: Tensor) -> Tensor: - """Get the intersection value by the given tensor list.""" - outputs_np = np.ones_like(inputs[0]) - for inp in inputs: - outputs_np &= inp.asnumpy() - outputs = mindspore.Tensor(outputs_np) - return outputs - - -def matmul(inputs_x: Tensor, inputs_y: Tensor) -> Tensor: - """Multiplies matrix `inputs_x` and matrix `inputs_y`.""" - matmul_op = op.MatMul() - outputs = matmul_op(inputs_x, inputs_y) - return outputs - - -def maximum(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor: - """Reduces a dimension of a tensor by the maximum value in this dimension.""" - max_op = op.ReduceMax(keep_dims) - outputs = max_op(inputs, axis) - return outputs - - -def minimum(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor: - """Reduces a dimension of a tensor by the minimum value in the dimension.""" - max_op = op.ReduceMin(keep_dims) - outputs = max_op(inputs, axis) - return outputs - - -def mean(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor: - """Reduces a dimension of a tensor by averaging all elements in the dimension.""" - mean_op = op.ReduceMean(keep_dims) - outputs = mean_op(inputs, axis) - return outputs - - -def mul(inputs_x: Tensor, inputs_y: Tensor) -> Tensor: - """ - Multiplies two tensors element-wise. - - Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. - The inputs must be two tensors or one tensor and one scalar. - When the inputs are two tensors, - dtypes of them cannot be both bool, and the shapes of them could be broadcast. - When the inputs are one tensor and one scalar, - the scalar could only be a constant. - - Inputs: - - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or - a bool or a tensor whose data type is number or bool. - - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or - a bool when the first input is a tensor or a tensor whose data type is number or bool. - - Outputs: - Tensor, the shape is the same as the one after broadcasting, - and the data type is the one with higher precision or higher digits among the two inputs. - """ - mul_op = op.Mul() - outputs = mul_op(inputs_x, inputs_y) - return outputs - - -def sort(inputs: Tensor, axis: _Axis = -1, descending: bool = False) -> Tensor: - """Return a sorted copy of an array.""" - inputs_np = inputs.asnumpy() - outputs_np = np.sort(inputs_np, axis=axis) - if descending: - outputs_np = np.flip(outputs_np, axis=axis) - outputs = mindspore.Tensor(outputs_np) - return outputs - - -def squeeze(inputs: Tensor, axis: _Axis = ()): - """Returns a tensor with the same type but dimensions of 1 are removed based on `axis`.""" - squeeze_op = op.Squeeze(axis) - outputs = squeeze_op(inputs) - return outputs - - -def tile(inputs: Tensor, shape: Tuple[int, ...]) -> Tensor: - """Replicates a tensor with given multiples times.""" - tile_op = op.Tile() - outputs = tile_op(inputs, shape) - return outputs - - -def reshape(inputs: Tensor, shape: _Shape) -> Tensor: - """Reshapes input tensor with the same values based on a given shape tuple.""" - if isinstance(shape, int): - shape = (shape,) - return op.Reshape()(inputs, shape) - - -def zeros(shape: _Shape, dtype: mindspore.dtype = None) -> Tensor: - """Return a new array of given shape and type, filled with zeros.""" - outputs = np.zeros(shape) - return mindspore.Tensor(outputs, dtype=dtype) - - -def zeros_like(inputs: Tensor, dtype: mindspore.dtype = None) -> Tensor: - """Return an array of zeros with the same shape and type as a given array.""" - inputs_np = inputs.asnumpy() - outputs_np = np.zeros_like(inputs_np) - outputs = mindspore.Tensor(outputs_np, dtype) - return outputs - - -def random(shape: _Shape, dtype: mindspore.dtype = None) -> Tensor: - """Return random floats in the half-open interval [0.0, 1.0).""" - outputs_np = np.random.random(shape) - outputs = mindspore.Tensor(outputs_np, dtype) - return outputs - - -def randint(low: int, high: int, shape: _Shape, dtype: mindspore.dtype = mindspore.int8) -> Tensor: - """Return random integers from `low` (inclusive) to `high` (exclusive).""" - outputs_np = np.random.randint(low, high, size=shape) - outputs = mindspore.Tensor(outputs_np, dtype=dtype) - return outputs - - -def softmax(axis: int = -1) -> Callable: - """Softmax activation function.""" - func = nn.Softmax(axis=axis) - return func - - -def summation(inputs: Tensor, axis: _Axis = (), keep_dims: bool = False) -> Tensor: - """Reduces a dimension of a tensor by summing all elements in the dimension.""" - sum_op = op.ReduceSum(keep_dims) - outputs = sum_op(inputs, axis) - return outputs - - -def stack(inputs: List[Tensor], axis: int) -> Tensor: - """Stacks a list of tensors in specified axis.""" - stack_op = op.Stack(axis) - outputs = stack_op(inputs) - return outputs - - -def sqrt(inputs: Tensor) -> Tensor: - """Returns square root of a tensor element-wise.""" - sqrt_op = op.Sqrt() - return sqrt_op(inputs) diff --git a/mindspore/explainer/_utils.py b/mindspore/explainer/_utils.py deleted file mode 100644 index 2ee5920cfee..00000000000 --- a/mindspore/explainer/_utils.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Utils for MindExplain""" - -__all__ = [ - 'ForwardProbe', - 'abs_max', - 'calc_auc', - 'calc_correlation', - 'deprecated_error', - 'format_tensor_to_ndarray', - 'generate_one_hot', - 'rank_pixels', - 'resize', - 'retrieve_layer_by_name', - 'retrieve_layer', - 'unify_inputs', - 'unify_targets' -] - -from typing import Tuple, Union - -import numpy as np -from PIL import Image - -import mindspore as ms -import mindspore.nn as nn -import mindspore.ops.operations as op - -_Array = np.ndarray -_Module = nn.Cell -_Tensor = ms.Tensor - - -class DeprecatedError(RuntimeError): - def __init__(self): - super().__init__("'mindspore.explainer' is deprecated from version 1.5 and " - "will be removed in a future version, use MindSpore XAI " - "https://gitee.com/mindspore/xai instead.") - - -def deprecated_error(func_or_cls): - del func_or_cls - raise DeprecatedError() - - -def abs_max(gradients): - """ - Transform gradients to saliency through abs then take max along channels. - - Args: - gradients (_Tensor): Gradients which will be transformed to saliency map. - - Returns: - _Tensor, saliency map integrated from gradients. - """ - gradients = op.Abs()(gradients) - saliency = op.ReduceMax(keep_dims=True)(gradients, axis=1) - return saliency - - -def generate_one_hot(indices, depth): - r""" - Simple wrap of OneHot operation, the on_value an off_value are fixed to 1.0 - and 0.0. - """ - on_value = ms.Tensor(1.0, ms.float32) - off_value = ms.Tensor(0.0, ms.float32) - weights = op.OneHot()(indices, depth, on_value, off_value) - return weights - - -def unify_inputs(inputs) -> tuple: - """Unify inputs of explainer.""" - if isinstance(inputs, tuple): - return inputs - if isinstance(inputs, ms.Tensor): - inputs = (inputs,) - elif isinstance(inputs, np.ndarray): - inputs = (ms.Tensor(inputs),) - else: - raise TypeError( - 'inputs must be one of [tuple, ms.Tensor or np.ndarray], ' - 'but get {}'.format(type(inputs))) - return inputs - - -def unify_targets(targets) -> ms.Tensor: - """Unify targets labels of explainer.""" - if isinstance(targets, ms.Tensor): - return targets - if isinstance(targets, list): - targets = ms.Tensor(targets, dtype=ms.int32) - if isinstance(targets, int): - targets = ms.Tensor([targets], dtype=ms.int32) - else: - raise TypeError( - 'targets must be one of [int, list or ms.Tensor], ' - 'but get {}'.format(type(targets))) - return targets - - -def retrieve_layer_by_name(model: _Module, layer_name: str): - """ - Retrieve the layer in the model by the given layer_name. - - Args: - model (Cell): Model which contains the target layer. - layer_name (str): Name of target layer. - - Returns: - Cell, the target layer. - - Raises: - ValueError: If module with given layer_name is not found in the model. - """ - if not isinstance(layer_name, str): - raise TypeError('layer_name should be type of str, but receive {}.' - .format(type(layer_name))) - - if not layer_name: - return model - - target_layer = None - for name, cell in model.cells_and_names(): - if name == layer_name: - target_layer = cell - return target_layer - - if target_layer is None: - raise ValueError( - 'Cannot match {}, please provide target layer' - 'in the given model.'.format(layer_name)) - return None - - -def retrieve_layer(model: _Module, target_layer: Union[str, _Module] = ''): - """ - Retrieve the layer in the model. - - 'target' can be either a layer name or a Cell object. Given the layer name, - the method will search thourgh the model and return the matched layer. If a - Cell object is provided, it will check whether the given layer exists - in the model. If target layer is not found in the model, ValueError will - be raised. - - Args: - model (Cell): Model which contains the target layer. - target_layer (str, Cell): Name of target layer or the target layer instance. - - Returns: - Cell, the target layer. - - Raises: - ValueError: If module with given layer_name is not found in the model. - """ - if isinstance(target_layer, str): - target_layer = retrieve_layer_by_name(model, target_layer) - return target_layer - - if isinstance(target_layer, _Module): - for _, cell in model.cells_and_names(): - if target_layer is cell: - return target_layer - raise ValueError( - 'Model not contain cell {}, fail to probe.'.format(target_layer) - ) - raise TypeError('layer_name must have type of str or ms.nn.Cell,' - 'but receive {}'.format(type(target_layer))) - - -class ForwardProbe: - """ - Probe to capture output of specific layer in a given model. - - Args: - target_layer (str, Cell): Name of target layer or the target layer instance. - """ - - def __init__(self, target_layer: _Module): - self._target_layer = target_layer - self._original_construct = self._target_layer.construct - self._intermediate_tensor = None - - @property - def value(self): - """Obtain the intermediate tensor.""" - return self._intermediate_tensor - - def __enter__(self): - self._target_layer.construct = self._new_construct - return self - - def __exit__(self, *_): - self._target_layer.construct = self._original_construct - self._intermediate_tensor = None - return False - - def _new_construct(self, *inputs): - outputs = self._original_construct(*inputs) - self._intermediate_tensor = outputs - return outputs - - -def format_tensor_to_ndarray(x: Union[ms.Tensor, np.ndarray]) -> np.ndarray: - """Unify Tensor and numpy.array to numpy.array.""" - if isinstance(x, ms.Tensor): - x = x.asnumpy() - - if not isinstance(x, np.ndarray): - raise TypeError('input should be one of [ms.Tensor or np.ndarray],' - ' but receive {}'.format(type(x))) - return x - - -def calc_correlation(x: Union[ms.Tensor, np.ndarray], - y: Union[ms.Tensor, np.ndarray]) -> float: - """Calculate Pearson correlation coefficient between two vectors.""" - x = format_tensor_to_ndarray(x) - y = format_tensor_to_ndarray(y) - - if len(x.shape) > 1 or len(y.shape) > 1: - raise ValueError('"calc_correlation" only support 1-dim vectors currently, but get shape {} and {}.' - .format(len(x.shape), len(y.shape))) - - if np.all(x == 0) or np.all(y == 0): - return np.float(0) - faithfulness = np.corrcoef(x, y)[0, 1] - return faithfulness - - -def calc_auc(x: _Array) -> _Array: - """Calculate the Area under Curve.""" - # take mean for multiple patches if the model is fully convolutional model - if len(x.shape) == 4: - x = np.mean(np.mean(x, axis=2), axis=3) - auc = (x.sum() - x[0] - x[-1]) / len(x) - return auc - - -def rank_pixels(inputs: _Array, descending: bool = True) -> _Array: - """ - Generate rank order for every pixel in an 2D array. - - The rank order start from 0 to (num_pixel-1). If descending is True, the - rank order will generate in a descending order, otherwise in ascending - order. - """ - if len(inputs.shape) < 2 or len(inputs.shape) > 3: - raise ValueError('Only support 2D or 3D inputs currently.') - - batch_size = inputs.shape[0] - flatten_saliency = inputs.reshape(batch_size, -1) - factor = -1 if descending else 1 - sorted_arg = np.argsort(factor * flatten_saliency, axis=1) - flatten_rank = np.zeros_like(sorted_arg) - arange = np.arange(flatten_saliency.shape[1]) - for i in range(batch_size): - flatten_rank[i][sorted_arg[i]] = arange - rank_map = flatten_rank.reshape(inputs.shape) - return rank_map - - -def resize(inputs: _Tensor, size: Tuple[int, int], mode: str) -> _Tensor: - """ - Resize the intermediate layer _attribution to the same size as inputs. - - Args: - inputs (Tensor): The input tensor to be resized. - size (tuple[int]): The targeted size resize to. - mode (str): The resize mode. Options: 'nearest_neighbor', 'bilinear'. - - Returns: - Tensor, the resized tensor. - - Raises: - ValueError: the resize mode is not in ['nearest_neighbor', 'bilinear']. - """ - h, w = size - if mode == 'nearest_neighbor': - resize_nn = op.ResizeNearestNeighbor((h, w)) - outputs = resize_nn(inputs) - - elif mode == 'bilinear': - inputs_np = inputs.asnumpy() - inputs_np = np.transpose(inputs_np, [0, 2, 3, 1]) - array_lst = [] - for inp in inputs_np: - array = (np.repeat(inp, 3, axis=2) * 255).astype(np.uint8) - image = Image.fromarray(array) - image = image.resize(size, resample=Image.BILINEAR) - array = np.asarray(image).astype(np.float32) / 255 - array_lst.append(array[:, :, 0:1]) - - resized_np = np.transpose(array_lst, [0, 3, 1, 2]) - outputs = ms.Tensor(resized_np, inputs.dtype) - else: - raise ValueError('Unsupported resize mode {}.'.format(mode)) - - return outputs diff --git a/mindspore/explainer/benchmark/__init__.py b/mindspore/explainer/benchmark/__init__.py deleted file mode 100644 index 3910488d643..00000000000 --- a/mindspore/explainer/benchmark/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Predefined XAI metrics.""" - -from ._attribution.class_sensitivity import ClassSensitivity -from ._attribution.faithfulness import Faithfulness -from ._attribution.localization import Localization -from ._attribution.robustness import Robustness - -__all__ = [ - "ClassSensitivity", - "Faithfulness", - "Localization", - "Robustness" -] diff --git a/mindspore/explainer/benchmark/_attribution/__init__.py b/mindspore/explainer/benchmark/_attribution/__init__.py deleted file mode 100644 index a9b9f6bd2be..00000000000 --- a/mindspore/explainer/benchmark/_attribution/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Predefined XAI metrics""" diff --git a/mindspore/explainer/benchmark/_attribution/class_sensitivity.py b/mindspore/explainer/benchmark/_attribution/class_sensitivity.py deleted file mode 100644 index 69be12ec9d7..00000000000 --- a/mindspore/explainer/benchmark/_attribution/class_sensitivity.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Class Sensitivity.""" - -import numpy as np - -from mindspore.explainer.explanation import RISE -from .metric import LabelAgnosticMetric -from ... import _operators as ops -from ..._utils import calc_correlation, deprecated_error - - -@deprecated_error -class ClassSensitivity(LabelAgnosticMetric): - """ - Class sensitivity metric used to evaluate attribution-based explanations. - - Reasonable atrribution-based explainers are expected to generate distinct saliency maps for different labels, - especially for labels of highest confidence and low confidence. ClassSensitivity evaluates the explainer through - computing the correlation between saliency maps of highest-confidence and lowest-confidence labels. Explainer with - better class sensitivity will receive lower correlation score. To make the evaluation results intuitive, the - returned score will take negative on correlation and normalize. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - - def evaluate(self, explainer, inputs): - """ - Evaluate class sensitivity on a single data sample. - - Args: - explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`. - inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`. - - Returns: - numpy.ndarray, 1D array of shape :math:`(N,)`, result of class sensitivity evaluated on `explainer`. - - Raises: - TypeError: Be raised for any argument type problem. - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.benchmark import ClassSensitivity - >>> from mindspore.explainer.explanation import Gradient - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> # prepare your explainer to be evaluated, e.g., Gradient. - >>> gradient = Gradient(net) - >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> class_sensitivity = ClassSensitivity() - >>> res = class_sensitivity.evaluate(gradient, input_x) - >>> print(res.shape) - (1,) - """ - self._check_evaluate_param(explainer, inputs) - - outputs = explainer.network(inputs) - - max_confidence_label = ops.argmax(outputs) - min_confidence_label = ops.argmin(outputs) - if isinstance(explainer, RISE): - labels = ops.stack([max_confidence_label, min_confidence_label], axis=1) - full_saliency = explainer(inputs, labels) - max_confidence_saliency = full_saliency[:, max_confidence_label].asnumpy() - min_confidence_saliency = full_saliency[:, min_confidence_label].asnumpy() - else: - max_confidence_saliency = explainer(inputs, max_confidence_label).asnumpy() - min_confidence_saliency = explainer(inputs, min_confidence_label).asnumpy() - - correlations = [] - for i in range(inputs.shape[0]): - correlation = calc_correlation(max_confidence_saliency[i].reshape(-1), - min_confidence_saliency[i].reshape(-1)) - normalized_correlation = (-correlation + 1) / 2 - correlations.append(normalized_correlation) - return np.array(correlations, np.float) diff --git a/mindspore/explainer/benchmark/_attribution/faithfulness.py b/mindspore/explainer/benchmark/_attribution/faithfulness.py deleted file mode 100644 index 8309b96818a..00000000000 --- a/mindspore/explainer/benchmark/_attribution/faithfulness.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Faithfulness.""" -from decimal import Decimal -from typing import Callable, Optional, Union - -import numpy as np - -import mindspore as ms -from mindspore import log, nn -from mindspore.train._utils import check_value_type -from .metric import LabelSensitiveMetric -from ..._utils import calc_auc, deprecated_error, format_tensor_to_ndarray -from ...explanation._attribution import Attribution as _Attribution -from ...explanation._attribution._perturbation.replacement import Constant, GaussianBlur -from ...explanation._attribution._perturbation.ablation import AblationWithSaliency - -_Array = np.ndarray -_Explainer = Union[_Attribution, Callable] -_Label = Union[int, ms.Tensor] -_Module = nn.Cell - - -def _calc_feature_importance(saliency: _Array, masks: _Array) -> _Array: - """Calculate feature important w.r.t given masks.""" - if saliency.shape[1] < masks.shape[2]: - saliency = np.repeat(saliency, repeats=masks.shape[2], axis=1) - - batch_size = masks.shape[0] - num_perturbations = masks.shape[1] - saliency = np.repeat(saliency, repeats=num_perturbations, axis=0) - saliency = saliency.reshape([batch_size, num_perturbations, -1]) - masks = masks.reshape([batch_size, num_perturbations, -1]) - feature_importance = saliency * masks - feature_importance = feature_importance.sum(-1) / masks.sum(-1) - return feature_importance - - -class _FaithfulnessHelper: - """Base class for faithfulness calculator.""" - _support = [Constant, GaussianBlur] - - def __init__(self, - perturb_percent: float, - perturb_mode: str, - perturb_method: str, - is_accumulate: bool, - perturb_pixel_per_step: Optional[int] = None, - num_perturbations: Optional[int] = None, - **kwargs): - - self._get_reference = None - for method in self._support: - if perturb_method == method.__name__: - self._get_reference = method(**kwargs) - if self._get_reference is None: - raise ValueError( - 'The param "perturb_method" should be one of {}.'.format([x.__name__ for x in self._support])) - - self._ablation = AblationWithSaliency(perturb_mode=perturb_mode, - perturb_percent=perturb_percent, - perturb_pixel_per_step=perturb_pixel_per_step, - num_perturbations=num_perturbations, - is_accumulate=is_accumulate) - - def calc_faithfulness(self, inputs, model, targets, saliency): - """Calc faithfulness.""" - raise NotImplementedError - - -class NaiveFaithfulness(_FaithfulnessHelper): - """ - Calculator for naive faithfulness. - - Naive faithfulness, the metric replace several pixels on original image by - specific method for each perturbations. The metric predicts on the perturbed - images and record a series of probabilities. Then calculates the - correlation between prob distribution and averaged feature importance. - Higher correlation indicates better faithfulness. - - Args: - perturb_percent (float): percentage of pixels to perturb - perturb_method (str): specify the method to replace the pixel. - Current support: ['Constant', 'GaussianBlur'] - is_accumulate (bool): whether to accumulate the former perturbations to - the later perturbations. - Default: False. - perturb_pixel_per_step (Optional[int]): number of pixel to perturb - for each perturbation. If perturb_pixel_per_step is None, actual - perturb_pixel_per_step will be calculate by: - num_image_pixel * perturb_percent / num_perturb_steps. - Default: None - num_perturbations (Optional[int]): number of perturbations. If - num_perturbations if None, it will be calculated by: - num_image_pixel * perturb_percent / perturb_pixel_per_step. - Default: None - kwargs: specific perturb_method will require - different arguments. Below lists required args for each method. - - 'Constant': base_value (int) - 'GaussianBlur': sigma (float): 0.7 - - Raises: - ValueError: Be raised for any argument value problem. - """ - - def __init__(self, - perturb_percent: float, - perturb_method: str, - is_accumulate: bool = False, - perturb_pixel_per_step: Optional[int] = None, - num_perturbations: Optional[int] = None, - **kwargs): - super().__init__(perturb_percent=perturb_percent, - perturb_mode='Deletion', - perturb_method=perturb_method, - is_accumulate=is_accumulate, - perturb_pixel_per_step=perturb_pixel_per_step, - num_perturbations=num_perturbations, - **kwargs) - - def calc_faithfulness(self, - inputs: _Array, - model: _Module, - targets: _Label, - saliency: _Array) -> np.ndarray: - """ - Calculate naive faithfulness. - - Args: - inputs (_Array): sample to calculate faithfulness score - model (_Module): model to explanation - targets (_Label): label to explanation on. - saliency (_Array): Saliency map of given inputs and targets from the - explainer. - - Return: - - faithfulness (np.ndarray): faithfulness score - - """ - if Decimal(str(saliency.max())) == Decimal(str(saliency.min())): - log.warning("The saliency map is uniform everywhere. The correlation will be set to zero.") - correlation = 0 - return np.array([correlation], np.float) - - batch_size = inputs.shape[0] - reference = self._get_reference(inputs) - masks = self._ablation.generate_mask(saliency, inputs.shape[1]) - perturbations = self._ablation(inputs, reference, masks) - feature_importance = _calc_feature_importance(saliency, masks) - - perturbations = perturbations.reshape(-1, *perturbations.shape[2:]) - perturbations = ms.Tensor(perturbations, dtype=ms.float32) - predictions = model(perturbations)[:, targets].asnumpy() - predictions = predictions.reshape(*feature_importance.shape) - - if Decimal(str(predictions.max())) == Decimal(str(predictions.min())): - log.warning("The perturbations do not affect the predictions. The correlation will be set to zero.") - correlation = 0 - return np.array([correlation], np.float) - - faithfulness = -np.corrcoef(feature_importance, predictions) - faithfulness = np.diag(faithfulness[:batch_size, batch_size:]) - return faithfulness - - -class DeletionAUC(_FaithfulnessHelper): - """ Calculator for deletion AUC. - - For Deletion AUC, the metric accumulative replace pixels on origin - images through specific 'perturb_method', predict on the perturbed images - and record series of probabilities. The metric then calculates the AUC of - the probability variation curve during perturbations. Faithfulness is define - as (1 - deletion_AUC). Higher score indicates better faithfulness of - explanation. - - Args: - perturb_percent (float): percentage of pixels to perturb - perturb_method (str): specify the method to replace the pixel. - Current support: ['Constant', 'GaussianBlur'] - perturb_pixel_per_step (Optional[int]): number of pixel to perturb - for each perturbation. If perturb_pixel_per_step is None, actual - perturb_pixel_per_step will be calculate by: - num_image_pixel * perturb_percent / num_perturb_steps. - Default: None - num_perturbations (Optional[int]): number of perturbations. If - num_perturbations if None, it will be calculated by: - num_image_pixel * perterb_percent / perturb_pixel_per_step. - Default: None - kwargs: specific perturb_method will require - different arguments. Below lists required args for each method. - - 'Constant': base_value (int) - 'GaussianBlur': sigma (float): 0.7 - - Raises: - ValueError: Be raised for any argument value problem. - """ - - def __init__(self, - perturb_percent: float, - perturb_method: str, - perturb_pixel_per_step: Optional[int] = None, - num_perturbations: Optional[int] = None, - **kwargs): - super().__init__(perturb_percent=perturb_percent, - perturb_mode='Deletion', - perturb_method=perturb_method, - perturb_pixel_per_step=perturb_pixel_per_step, - num_perturbations=num_perturbations, - is_accumulate=True, - **kwargs) - - def calc_faithfulness(self, - inputs: _Array, - model: _Module, - targets: _Label, - saliency: _Array) -> _Array: - """ - Calculate faithfulness through deletion AUC. - - Args: - inputs (_Array): sample to calculate faithfulness score - model (_Module): model to explanation - targets (_Label): label to explanation on. - saliency (_Array): Saliency map of given inputs and targets from the - explainer. - - Return: - - faithfulness (float): faithfulness score - - """ - reference = self._get_reference(inputs) - masks = self._ablation.generate_mask(saliency, inputs.shape[1]) - perturbations = self._ablation(inputs, reference, masks) - perturbations = perturbations.reshape(-1, *perturbations.shape[2:]) - perturbations = ms.Tensor(perturbations, dtype=ms.float32) - predictions = model(perturbations).asnumpy()[:, targets] - predictions = predictions.reshape((inputs.shape[0], -1)) - input_tensor = ms.Tensor(inputs, ms.float32) - original_output = model(input_tensor).asnumpy()[:, targets] - - auc = calc_auc(original_output.squeeze() - predictions.squeeze()) - return np.array([1 - auc], np.float) - - -class InsertionAUC(_FaithfulnessHelper): - """ Calculator for insertion AUC. - - For Insertion AUC, the metric accumulative replace pixels of reference - image by pixels from origin image, like inserting pixel from origin image to - reference. The reference if generated through specific 'perturb_method'. - The metric predicts on the perturbed images and records series of - probabilities. The metric then calculates the AUC of the probability - variation curve during perturbations. Faithfulness is define as (1 - - deletion_AUC). Higher score indicates better faithfulness of explanation. - - Args: - perturb_percent (float): percentage of pixels to perturb - perturb_method (str): specify the method to replace the pixel. - Current support: ['Constant', 'GaussianBlur'] - perturb_pixel_per_step (Optional[int]): number of pixel to perturb - for each perturbation. If perturb_pixel_per_step is None, actual - perturb_pixel_per_step will be calculate by: - num_image_pixel * perturb_percent / num_perturb_steps. - Default: None - num_perturbations (Optional[int]): number of perturbations. If - num_perturbations if None, it will be calculated by: - num_image_pixel * perterb_percent / perturb_pixel_per_step. - Default: None - kwargs: specific perturb_method will require - different arguments. Below lists required args for each method. - - 'Constant': base_value (int) - 'GaussianBlur': sigma (float): 0.7 - - Raises: - ValueError: Be raised for any argument value problem. - """ - - def __init__(self, - perturb_percent: float, - perturb_method: str, - perturb_pixel_per_step: Optional[int] = None, - num_perturbations: Optional[int] = None, - **kwargs): - super().__init__(perturb_percent=perturb_percent, - perturb_mode='Insertion', - perturb_method=perturb_method, - perturb_pixel_per_step=perturb_pixel_per_step, - num_perturbations=num_perturbations, - is_accumulate=True, - **kwargs) - - def calc_faithfulness(self, - inputs: _Array, - model: _Module, - targets: _Label, - saliency: _Array) -> _Array: - """ - Calculate faithfulness through insertion AUC. - - Args: - inputs (_Array): sample to calculate faithfulness score - model (_Module): model to explanation - targets (_Label): label to explanation on. - saliency (_Array): Saliency map of given inputs and targets from the - explainer. - - Return: - - faithfulness (float): faithfulness score - """ - reference = self._get_reference(inputs) - masks = self._ablation.generate_mask(saliency, inputs.shape[1]) - perturbations = self._ablation(inputs, reference, masks) - perturbations = perturbations.reshape(-1, *perturbations.shape[2:]) - perturbations = ms.Tensor(perturbations, dtype=ms.float32) - predictions = model(perturbations).asnumpy()[:, targets] - predictions = predictions.reshape((inputs.shape[0], -1)) - - base_tensor = ms.Tensor(reference, ms.float32) - base_outputs = model(base_tensor).asnumpy()[:, targets] - - auc = calc_auc(predictions.squeeze() - base_outputs.squeeze()) - return np.array([auc], np.float) - - -@deprecated_error -class Faithfulness(LabelSensitiveMetric): - """ - Provides evaluation on faithfulness on XAI explanations. - - Three specific metrics to obtain quantified results are supported: "NaiveFaithfulness", "DeletionAUC", and - "InsertionAUC". - - For metric "NaiveFaithfulness", a series of perturbed images are created by modifying pixels - on original image. Then the perturbed images will be fed to the model and a series of output probability drops can - be obtained. The faithfulness is then quantified as the correlation between the propability drops and the saliency - map values on the same pixels (we normalize the correlation further to make them in range of [0, 1]). - - For metric "DeletionAUC", a series of perturbed images are created by accumulatively modifying pixels of the - original image to a base value (e.g. a constant). The perturbation starts from pixels with high saliency values - to pixels with low saliency values. Feeding the perturbed images into the model in order, an output probability - drop curve can be obtained. "DeletionAUC" is then obtained as the area under this probability drop curve. - - For metric "InsertionAUC", a series of perturbed images are created by accumulatively inserting pixels of the - original image to a reference image (e.g. a black image). The insertion starts from pixels with high saliency - values to pixels with low saliency values. Feeding the perturbed images into the model in order, an output - probability increase curve can be obtained. "InsertionAUC" is then obtained as the area under this curve. - - For all the three metrics, higher value indicates better faithfulness. - - Args: - num_labels (int): Number of labels. - activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For - single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification - tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long - as when combining this function with network, the final output is the probability of the input. - metric (str, optional): The specifi metric to quantify faithfulness. - Options: "DeletionAUC", "InsertionAUC", "NaiveFaithfulness". - Default: 'NaiveFaithfulness'. - - Raises: - TypeError: Be raised for any argument type problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - _methods = [NaiveFaithfulness, DeletionAUC, InsertionAUC] - - def __init__(self, num_labels, activation_fn, metric="NaiveFaithfulness"): - super(Faithfulness, self).__init__(num_labels) - - perturb_percent = 0.5 # ratio of pixels to be perturbed, future argument - perturb_method = "Constant" # perturbation method, all the perturbed pixels will be set to constant - base_value = 0.0 # the pixel value set for the perturbed pixels - - check_value_type("activation_fn", activation_fn, nn.Cell) - self._activation_fn = activation_fn - - self._verify_metrics(metric) - for method in self._methods: - if metric == method.__name__: - self._faithfulness_helper = method( - perturb_percent=perturb_percent, - perturb_method=perturb_method, - base_value=base_value - ) - - def evaluate(self, explainer, inputs, targets, saliency=None): - """ - Evaluate faithfulness on a single data sample. - - Note: - Currently only single sample (:math:`N=1`) at each call is supported. - - Args: - explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`. - inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`. - targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. - If `targets` is a 1D tensor, its length should be the same as `inputs`. - saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`. - If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and - continue the evaluation. Default: None. - - Returns: - numpy.ndarray, 1D array of shape :math:`(N,)`, result of faithfulness evaluated on `explainer`. - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore import nn - >>> from mindspore.explainer.benchmark import Faithfulness - >>> from mindspore.explainer.explanation import Gradient - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # init a `Faithfulness` object - >>> num_labels = 10 - >>> metric = "InsertionAUC" - >>> activation_fn = nn.Softmax() - >>> faithfulness = Faithfulness(num_labels, activation_fn, metric) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> gradient = Gradient(net) - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> targets = 5 - >>> # usage 1: input the explainer and the data to be explained, - >>> # faithfulness is a Faithfulness instance - >>> res = faithfulness.evaluate(gradient, inputs, targets) - >>> print(res.shape) - (1,) - >>> # usage 2: input the generated saliency map - >>> saliency = gradient(inputs, targets) - >>> res = faithfulness.evaluate(gradient, inputs, targets, saliency) - >>> print(res.shape) - (1,) - """ - - self._check_evaluate_param(explainer, inputs, targets, saliency) - - if saliency is None: - saliency = explainer(inputs, targets) - - inputs = format_tensor_to_ndarray(inputs) - saliency = format_tensor_to_ndarray(saliency) - - full_network = nn.SequentialCell([explainer.network, self._activation_fn]) - faithfulness = self._faithfulness_helper.calc_faithfulness(inputs=inputs, model=full_network, - targets=targets, saliency=saliency) - return (1 + faithfulness) / 2 - - def _verify_metrics(self, metric: str): - supports = [x.__name__ for x in self._methods] - if metric not in supports: - raise ValueError("Metric should be one of {}.".format(supports)) diff --git a/mindspore/explainer/benchmark/_attribution/localization.py b/mindspore/explainer/benchmark/_attribution/localization.py deleted file mode 100644 index c65264f447e..00000000000 --- a/mindspore/explainer/benchmark/_attribution/localization.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Localization metrics.""" -import numpy as np - -from mindspore.train._utils import check_value_type -from .metric import LabelSensitiveMetric -from ..._operators import maximum, reshape, Tensor -from ..._utils import deprecated_error, format_tensor_to_ndarray - - -def _get_max_position(saliency): - """Get the position of the max pixel of the saliency map.""" - saliency = saliency.asnumpy() - w = saliency.shape[3] - saliency = np.reshape(saliency, (len(saliency), -1)) - max_arg = np.argmax(saliency, axis=1) - return max_arg // w, max_arg - (max_arg // w) * w - - -def _mask_out_saliency(saliency, threshold): - """Keep the saliency map with value greater than threshold.""" - max_value = maximum(saliency) - mask_out = saliency > (reshape(max_value, (len(saliency), -1, 1, 1)) * threshold) - return mask_out - - -@deprecated_error -class Localization(LabelSensitiveMetric): - r""" - Provides evaluation on the localization capability of XAI methods. - - Three specific metrics to obtain quantified results are supported: "PointingGame", and "IoSR" - (Intersection over Salient Region). - - For metric "PointingGame", the localization capability is calculated as the ratio of data in which the max position - of their saliency maps lies within the bounding boxes. Specifically, for a single datum, given the saliency map and - its bounding box, if the max point of its saliency map lies within the bounding box, the evaluation result is 1 - otherwise 0. - - For metric "IoSR" (Intersection over Salient Region), the localization capability is calculated as the intersection - of the bounding box and the salient region over the area of the salient region. The salient region is defined as - the region whose value exceeds :math:`\theta * \max{saliency}`. - - Args: - num_labels (int): Number of classes in the dataset. - metric (str, optional): Specific metric to calculate localization capability. - Options: "PointingGame", "IoSR". Default: "PointingGame". - - Raises: - TypeError: Be raised for any argument type problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - - def __init__(self, - num_labels, - metric="PointingGame" - ): - super(Localization, self).__init__(num_labels) - self._verify_metrics(metric) - self._metric = metric - - # Arg for specific metric, for "PointingGame" it should be an integer indicating the tolerance - # of "PointingGame", while for "IoSR" it should be a float number - # indicating the threshold to choose salient region. Default: 25. - if self._metric == "PointingGame": - self._metric_arg = 15 - else: - self._metric_arg = 0.5 - - @staticmethod - def _verify_metrics(metric): - """Verify the user defined metric.""" - supports = ["PointingGame", "IoSR"] - if metric not in supports: - raise ValueError("Metric should be one of {}".format(supports)) - - def evaluate(self, explainer, inputs, targets, saliency=None, mask=None): - """ - Evaluate localization on a single data sample. - - Note: - Currently only single sample (:math:`N=1`) at each call is supported. - - Args: - explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`. - inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`. - targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. - If `targets` is a 1D tensor, its length should be the same as `inputs`. - saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`. - If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and - continue the evaluation. Default: None. - mask (Tensor, numpy.ndarray): Ground truth bounding box/masks for the inputs w.r.t targets, a 4D tensor - or numpy.ndarray of shape :math:`(N, 1, H, W)`. - - Returns: - numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`. - - Raises: - ValueError: Be raised for any argument value problem. - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import Gradient - >>> from mindspore.explainer.benchmark import Localization - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> num_labels = 10 - >>> localization = Localization(num_labels, "PointingGame") - >>> - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> gradient = Gradient(net) - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> masks = np.zeros([1, 1, 32, 32]) - >>> masks[:, :, 10: 20, 10: 20] = 1 - >>> targets = 5 - >>> # usage 1: input the explainer and the data to be explained, - >>> # localization is a Localization instance - >>> res = localization.evaluate(gradient, inputs, targets, mask=masks) - >>> print(res.shape) - (1,) - >>> # usage 2: input the generated saliency map - >>> saliency = gradient(inputs, targets) - >>> res = localization.evaluate(gradient, inputs, targets, saliency, mask=masks) - >>> print(res.shape) - (1,) - """ - self._check_evaluate_param_with_mask(explainer, inputs, targets, saliency, mask) - - mask_np = format_tensor_to_ndarray(mask)[0] - - if saliency is None: - saliency = explainer(inputs, targets) - - if self._metric == "PointingGame": - point = _get_max_position(saliency) - - x, y = np.meshgrid( - (np.arange(mask_np.shape[1]) - point[0]) ** 2, - (np.arange(mask_np.shape[2]) - point[1]) ** 2) - max_region = (x + y) < self._metric_arg ** 2 - - # if max_region has overlap with mask_np return 1 otherwise 0. - result = 1 if (mask_np.astype(bool) & max_region).any() else 0 - - elif self._metric == "IoSR": - mask_out_np = format_tensor_to_ndarray(_mask_out_saliency(saliency, self._metric_arg)) - overlap = np.sum(mask_np.astype(bool) & mask_out_np.astype(bool)) - saliency_area = np.sum(mask_out_np) - result = overlap / saliency_area.clip(min=1e-10) - return np.array([result], np.float) - - def _check_evaluate_param_with_mask(self, explainer, inputs, targets, saliency, mask): - self._check_evaluate_param(explainer, inputs, targets, saliency) - if len(inputs.shape) != 4: - raise ValueError('Argument mask must be 4D Tensor') - if mask is None: - raise ValueError('To compute localization, mask must be provided.') - check_value_type('mask', mask, (Tensor, np.ndarray)) - if len(mask.shape) != 4 or len(mask) != len(inputs): - raise ValueError("The input mask must be 4-dimensional (1, 1, h, w) with same length of inputs.") diff --git a/mindspore/explainer/benchmark/_attribution/metric.py b/mindspore/explainer/benchmark/_attribution/metric.py deleted file mode 100644 index 10eef90bb62..00000000000 --- a/mindspore/explainer/benchmark/_attribution/metric.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Base class for XAI metrics.""" - -import copy -import math -from typing import Callable - -import numpy as np - -import mindspore as ms -from mindspore import log as logger -from mindspore.train._utils import check_value_type -from ..._operators import Tensor -from ..._utils import format_tensor_to_ndarray -from ...explanation._attribution.attribution import Attribution - -_Explainer = Attribution - - -def verify_argument(inputs, arg_name): - """Verify the validity of the parsed arguments.""" - check_value_type(arg_name, inputs, Tensor) - if len(inputs.shape) != 4: - raise ValueError('Argument {} must be a 4D Tensor.'.format(arg_name)) - if len(inputs) > 1: - raise ValueError('Support single data evaluation only, but got {}.'.format(len(inputs))) - - -def verify_targets(targets, num_labels): - """Verify the validity of the parsed targets.""" - check_value_type('targets', targets, (int, Tensor)) - - if isinstance(targets, Tensor): - if len(targets.shape) > 1 or (len(targets.shape) == 1 and len(targets) != 1): - raise ValueError('Argument targets must be a 1D or 0D Tensor. If it is a 1D Tensor, ' - 'it should have the length = 1 as we only support single evaluation now.') - targets = int(targets.asnumpy()[0]) if len(targets.shape) == 1 else int(targets.asnumpy()) - if targets > num_labels - 1 or targets < 0: - raise ValueError('Parsed targets exceed the label range.') - - -class AttributionMetric: - """Super class of XAI metric class used in classification scenarios.""" - - def __init__(self): - self._explainer = None - - evaluate: Callable - """ - This method evaluates the explainer on the given attribution and returns the evaluation results. - Derived class should implement this method according to specific algorithms of the metric. - """ - - def _record_explainer(self, explainer: _Explainer): - """Record the explainer in current evaluation.""" - if self._explainer is None: - self._explainer = explainer - elif self._explainer is not explainer: - logger.info('Provided explainer is not the same as previously evaluated one. Please reset the evaluated ' - 'results. Previous explainer: %s, current explainer: %s', self._explainer, explainer) - self._explainer = explainer - - -class LabelAgnosticMetric(AttributionMetric): - """Super class add functions for label-agnostic metric.""" - - def __init__(self): - super().__init__() - self._global_results = [] - - @property - def performance(self) -> float: - """ - Return the average evaluation result. - - Return: - float, averaged result. If no result is aggregate in the global_results, 0.0 will be returned. - """ - result_sum, count = 0, 0 - for res in self._global_results: - if math.isfinite(res): - result_sum += res - count += 1 - return 0. if count == 0 else result_sum / count - - def aggregate(self, result): - """Aggregate single evaluation result to global results.""" - if isinstance(result, float): - self._global_results.append(result) - elif isinstance(result, (ms.Tensor, np.ndarray)): - result = format_tensor_to_ndarray(result) - self._global_results.extend([float(res) for res in result.reshape(-1)]) - else: - raise TypeError('result should have type of float, ms.Tensor or np.ndarray, but receive %s' % type(result)) - - def get_results(self): - """Return the global results.""" - return self._global_results.copy() - - def reset(self): - """Reset global results.""" - self._global_results.clear() - - def _check_evaluate_param(self, explainer, inputs): - """Check the evaluate parameters.""" - check_value_type('explainer', explainer, Attribution) - self._record_explainer(explainer) - verify_argument(inputs, 'inputs') - - -class LabelSensitiveMetric(AttributionMetric): - """Super class add functions for label-sensitive metrics.""" - - def __init__(self, num_labels: int): - super().__init__() - LabelSensitiveMetric._verify_params(num_labels) - self._num_labels = num_labels - self._global_results = {i: [] for i in range(num_labels)} - - @property - def num_labels(self): - """Number of labels used in evaluation.""" - return self._num_labels - - @staticmethod - def _verify_params(num_labels): - """Checks whether num_labels is valid.""" - check_value_type("num_labels", num_labels, int) - if num_labels < 1: - raise ValueError("Argument num_labels must be parsed with an integer > 0.") - - def aggregate(self, result, targets): - """Aggregates single result to global_results.""" - if isinstance(result, float): - if isinstance(targets, int): - self._global_results[targets].append(result) - else: - target_np = format_tensor_to_ndarray(targets) - if len(target_np) > 1: - raise ValueError("One result can not be aggreated to multiple targets.") - elif isinstance(result, (ms.Tensor, np.ndarray)): - result_np = format_tensor_to_ndarray(result).reshape(-1) - if isinstance(targets, int): - for res in result_np: - self._global_results[targets].append(float(res)) - else: - target_np = format_tensor_to_ndarray(targets).reshape(-1) - if len(target_np) != len(result_np): - raise ValueError("Length of result does not match with length of targets.") - for tar, res in zip(target_np, result_np): - self._global_results[int(tar)].append(float(res)) - else: - raise TypeError('Result should have type of float, ms.Tensor or np.ndarray, but receive %s' % type(result)) - - def reset(self): - """Resets global_result.""" - self._global_results = {i: [] for i in range(self._num_labels)} - - @property - def class_performances(self): - """ - Get the class performances by global result. - - Returns: - (:class:`list`): a list of performances where each value is the average score of specific class. - """ - results_on_labels = [] - for label_id in range(self._num_labels): - sum_of_label, count_of_label = 0, 0 - for res in self._global_results[label_id]: - if math.isfinite(res): - sum_of_label += res - count_of_label += 1 - results_on_labels.append(0. if count_of_label == 0 else sum_of_label / count_of_label) - return results_on_labels - - @property - def performance(self): - """ - Get the performance by global result. - - Returns: - (:class:`float`): mean performance. - """ - result_sum, count = 0, 0 - for label_id in range(self._num_labels): - for res in self._global_results[label_id]: - if math.isfinite(res): - result_sum += res - count += 1 - return 0. if count == 0 else result_sum / count - - def get_results(self): - """Global result of the metric can be return""" - return copy.deepcopy(self._global_results) - - def _check_evaluate_param(self, explainer, inputs, targets, saliency): - """Check the evaluate parameters.""" - check_value_type('explainer', explainer, Attribution) - self._record_explainer(explainer) - verify_argument(inputs, 'inputs') - output = explainer.network(inputs) - check_value_type("output of explainer model", output, Tensor) - output_dim = explainer.network(inputs).shape[1] - if output_dim != self._num_labels: - raise ValueError("The output dimension of of black-box model in explainer does not match the dimension " - "of num_labels set in the __init__, please check explainer and num_labels again.") - verify_targets(targets, self._num_labels) - check_value_type('saliency', saliency, (Tensor, type(None))) diff --git a/mindspore/explainer/benchmark/_attribution/robustness.py b/mindspore/explainer/benchmark/_attribution/robustness.py deleted file mode 100644 index 9be991bca43..00000000000 --- a/mindspore/explainer/benchmark/_attribution/robustness.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Robustness.""" - -import numpy as np - -import mindspore as ms -import mindspore.nn as nn -from mindspore.train._utils import check_value_type -from mindspore import log -from .metric import LabelSensitiveMetric -from ...explanation._attribution._perturbation.replacement import RandomPerturb -from ..._utils import deprecated_error - - -@deprecated_error -class Robustness(LabelSensitiveMetric): - """ - Robustness perturbs the inputs by adding random noise and choose the maximum sensitivity as evaluation score from - the perturbations. - - Args: - num_labels (int): Number of classes in the dataset. - activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For - single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification - tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long - as when combining this function with network, the final output is the probability of the input. - - Raises: - TypeError: Be raised for any argument type problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - - def __init__(self, num_labels, activation_fn): - super().__init__(num_labels) - check_value_type("activation_fn", activation_fn, nn.Cell) - self._perturb = RandomPerturb() - self._num_perturbations = 10 # number of perturbations used in evaluation - self._threshold = 0.1 # threshold to generate perturbation - self._activation_fn = activation_fn - - def evaluate(self, explainer, inputs, targets, saliency=None): - """ - Evaluate robustness on single sample. - - Note: - Currently only single sample (:math:`N=1`) at each call is supported. - - Args: - explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`. - inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`. - targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. - If `targets` is a 1D tensor, its length should be the same as `inputs`. - saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`. - If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and - continue the evaluation. Default: None. - - Returns: - numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`. - - Raises: - ValueError: If batch_size is larger than 1. - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore import nn - >>> from mindspore.explainer.explanation import Gradient - >>> from mindspore.explainer.benchmark import Robustness - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # Initialize a Robustness benchmarker passing num_labels of the dataset. - >>> num_labels = 10 - >>> activation_fn = nn.Softmax() - >>> robustness = Robustness(num_labels, activation_fn) - >>> - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> # prepare your explainer to be evaluated, e.g., Gradient. - >>> gradient = Gradient(net) - >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> target_label = ms.Tensor([0], ms.int32) - >>> # robustness is a Robustness instance - >>> res = robustness.evaluate(gradient, input_x, target_label) - >>> print(res.shape) - (1,) - """ - - self._check_evaluate_param(explainer, inputs, targets, saliency) - if inputs.shape[0] > 1: - raise ValueError('Robustness only support a sample each time, but receive {}'.format(inputs.shape[0])) - - if isinstance(targets, int): - targets = ms.Tensor([targets], ms.int32) - if saliency is None: - saliency = explainer(inputs, targets) - saliency = saliency.asnumpy() - - norm = np.sqrt(np.sum(np.square(saliency), axis=tuple(range(1, len(saliency.shape))))) - if (norm == 0).any(): - log.warning('Get saliency norm equals 0, robustness return NaN for zero-norm saliency currently.') - norm[norm == 0] = np.nan - - full_network = nn.SequentialCell([explainer.network, self._activation_fn]) - original_outputs = full_network(inputs).asnumpy() - sensitivities = [] - inputs = inputs.asnumpy() - for _ in range(self._num_perturbations): - perturbations = [] - for j, sample in enumerate(inputs): - perturbation_on_single_sample = self._perturb_with_threshold(full_network, - np.expand_dims(sample, axis=0), - original_outputs[j]) - perturbations.append(perturbation_on_single_sample) - perturbations = np.vstack(perturbations) - perturbations = explainer(ms.Tensor(perturbations, ms.float32), targets).asnumpy() - sensitivity = np.sqrt(np.sum((perturbations - saliency) ** 2, - axis=tuple(range(1, len(saliency.shape))))) - sensitivities.append(sensitivity) - sensitivities = np.stack(sensitivities, axis=-1) - sensitivity = np.max(sensitivities, axis=1) / norm - return 1 / np.exp(sensitivity) - - def _perturb_with_threshold(self, network: nn.Cell, sample: np.ndarray, original_output: np.ndarray) -> np.ndarray: - """ - Generate the perturbation until the L2-distance between original_output and perturbation_output is lower than - the given self._threshold or until the attempt reaches the max_attempt_time. - """ - # the maximum time attempt to get a perturbation with perturb_error low than self._threshold - max_attempt_time = 3 - perturbation = None - for _ in range(max_attempt_time): - perturbation = self._perturb(sample) - perturbation_output = self._activation_fn(network(ms.Tensor(sample, ms.float32))).asnumpy() - perturb_error = np.linalg.norm(original_output - perturbation_output) - if perturb_error <= self._threshold: - return perturbation - return perturbation diff --git a/mindspore/explainer/explanation/__init__.py b/mindspore/explainer/explanation/__init__.py deleted file mode 100644 index 8251e1ce152..00000000000 --- a/mindspore/explainer/explanation/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Predefined Attribution explainers.""" - -from ._attribution._backprop.gradient import Gradient -from ._attribution._backprop.gradcam import GradCAM -from ._attribution._backprop.modified_relu import Deconvolution, GuidedBackprop -from ._attribution._perturbation.occlusion import Occlusion -from ._attribution._perturbation.rise import RISE - -__all__ = [ - 'Gradient', - 'Deconvolution', - 'GuidedBackprop', - 'GradCAM', - 'Occlusion', - 'RISE', -] diff --git a/mindspore/explainer/explanation/_attribution/__init__.py b/mindspore/explainer/explanation/_attribution/__init__.py deleted file mode 100644 index 97f492ffdb2..00000000000 --- a/mindspore/explainer/explanation/_attribution/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Predefined Attribution explainers.""" - -from .attribution import Attribution - -__all__ = [ - 'Attribution' -] diff --git a/mindspore/explainer/explanation/_attribution/_backprop/__init__.py b/mindspore/explainer/explanation/_attribution/_backprop/__init__.py deleted file mode 100644 index fd673e4a435..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Backprop-base _attribution explainer.""" diff --git a/mindspore/explainer/explanation/_attribution/_backprop/backprop_utils.py b/mindspore/explainer/explanation/_attribution/_backprop/backprop_utils.py deleted file mode 100644 index 2c1017bfd38..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/backprop_utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Providing utility functions.""" - -from mindspore.nn import Cell -from mindspore.ops.composite import GradOperation -from mindspore.explainer._utils import unify_inputs, unify_targets, generate_one_hot - - -def get_bp_weights(model, inputs, targets=None, weights=None): - r""" - Compute the gradient of output w.r.t input. - - Args: - model (Cell): Differentiable black-box model. - inputs (Tensor): Input to calculate gradient and explanation. - targets (int, optional): Target label id specifying which category to compute gradient. Default: None. - weights (Tensor, optional): Custom weights for computing gradients. The shape of weights should match the model - outputs. If None is provided, an one-hot weights with one in targets positions will be used instead. - Default: None. - - Returns: - Tensor, signal to be back-propagated to the input. - """ - inputs = unify_inputs(inputs) - if targets is None and weights is None: - raise ValueError('Must provide one of targets or weights') - if weights is None: - targets = unify_targets(targets) - output = model(*inputs) - num_categories = output.shape[-1] - weights = generate_one_hot(targets, num_categories) - return weights - - -class GradNet(Cell): - """ - Network for gradient calculation. - - Args: - network (Cell): The network to generate backpropagated gradients. - """ - - def __init__(self, network): - super(GradNet, self).__init__() - self.network = network - self.grad = GradOperation(get_all=True, sens_param=True)(network) - - def construct(self, *input_data): - """ - Get backpropgated gradients. - - Returns: - Tensor, output gradients. - """ - gout = self.grad(*input_data)[0] - return gout diff --git a/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py b/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py deleted file mode 100644 index b3e68f4835a..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""GradCAM.""" - -from mindspore.ops import operations as op -from mindspore.explainer._utils import deprecated_error, ForwardProbe, retrieve_layer, unify_inputs, unify_targets - -from .backprop_utils import get_bp_weights, GradNet -from .intermediate_layer import IntermediateLayerAttribution - - -def _gradcam_aggregation(attributions): - """ - Aggregate the gradient and activation to get the final _attribution. - - Args: - attributions (Tensor): the _attribution with channel dimension. - - Returns: - Tensor: the _attribution with channel dimension aggregated. - """ - sum_ = op.ReduceSum(keep_dims=True) - relu_ = op.ReLU() - attributions = relu_(sum_(attributions, 1)) - return attributions - - -@deprecated_error -class GradCAM(IntermediateLayerAttribution): - r""" - Provides GradCAM explanation method. - - `GradCAM` generates saliency map at intermediate layer. The attribution is obtained as: - - .. math:: - - \alpha_k^c = \frac{1}{Z} \sum_i \sum_j \frac{\partial{y^c}}{\partial{A_{i,j}^k}} - - attribution = ReLU(\sum_k \alpha_k^c A^k) - - For more details, please refer to the original paper: `GradCAM `_. - - Note: - The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`. - If you want to train the `network` afterwards, please reset it back to training mode through the opposite - operations. - - Args: - network (Cell): The black-box model to be explained. - layer (str, optional): The layer name to generate the explanation, usually chosen as the last convolutional - layer for better practice. If it is '', the explanation will be generated at the input layer. - Default: ''. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument or input type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import GradCAM - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> # specify a layer name to generate explanation, usually the layer can be set as the last conv layer. - >>> layer_name = 'conv2' - >>> # init GradCAM with a trained network and specify the layer to obtain attribution - >>> gradcam = GradCAM(net, layer=layer_name) - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> label = 5 - >>> saliency = gradcam(inputs, label) - >>> print(saliency.shape) - (1, 1, 32, 32) - """ - - def __init__(self, network, layer=""): - super(GradCAM, self).__init__(network, layer) - - self._saliency_cell = retrieve_layer(self._backward_model, target_layer=layer) - self._avgpool = op.ReduceMean(keep_dims=True) - self._intermediate_grad = None - self._aggregation_fn = _gradcam_aggregation - self._resize_mode = 'bilinear' - - def _hook_cell(self): - if self._saliency_cell: - self._saliency_cell.register_backward_hook(self._cell_hook_fn) - self._saliency_cell.enable_hook = True - self._intermediate_grad = None - - def _cell_hook_fn(self, _, grad_input, grad_output): - """ - Hook function to deal with the backward gradient. - - The arguments are set as required by `Cell.register_backward_hook`. - """ - self._intermediate_grad = grad_input - - def __call__(self, inputs, targets): - """Call function for `GradCAM`.""" - self._verify_data(inputs, targets) - self._hook_cell() - - with ForwardProbe(self._saliency_cell) as probe: - - inputs = unify_inputs(inputs) - targets = unify_targets(targets) - - weights = get_bp_weights(self._backward_model, *inputs, targets) - grad_net = GradNet(self._backward_model) - gradients = grad_net(*inputs, weights) - # get intermediate activation - activation = (probe.value,) - - if self._layer == "": - activation = inputs - self._intermediate_grad = unify_inputs(gradients) - if self._intermediate_grad is not None: - # average pooling on gradients - intermediate_grad = unify_inputs( - self._avgpool(self._intermediate_grad[0], (2, 3))) - else: - raise ValueError("Gradient for intermediate layer is not " - "obtained") - mul = op.Mul() - attribution = self._aggregation_fn( - mul(*intermediate_grad, *activation)) - if self._resize: - attribution = self._resize_fn(attribution, *inputs, - mode=self._resize_mode) - self._intermediate_grad = None - - return attribution diff --git a/mindspore/explainer/explanation/_attribution/_backprop/gradient.py b/mindspore/explainer/explanation/_attribution/_backprop/gradient.py deleted file mode 100644 index 8409d9919ac..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/gradient.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Gradient explainer.""" -from copy import deepcopy - -from mindspore.train._utils import check_value_type -from mindspore.explainer._operators import Tensor -from mindspore.explainer._utils import abs_max, deprecated_error, unify_inputs, unify_targets - -from .. import Attribution -from .backprop_utils import get_bp_weights, GradNet - - -@deprecated_error -class Gradient(Attribution): - r""" - Provides Gradient explanation method. - - Gradient is the simplest attribution method which uses the naive gradients of outputs w.r.t inputs as the - explanation. - - .. math:: - - attribution = \frac{\partial{y}}{\partial{x}} - - Note: - The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`. - If you want to train the `network` afterwards, please reset it back to training mode through the opposite - operations. - - Args: - network (Cell): The black-box model to be explained. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import Gradient - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> gradient = Gradient(net) - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> label = 5 - >>> saliency = gradient(inputs, label) - >>> print(saliency.shape) - (1, 1, 32, 32) - """ - - def __init__(self, network): - super(Gradient, self).__init__(network) - self._backward_model = deepcopy(network) - self._backward_model.set_train(False) - self._backward_model.set_grad(False) - self._grad_net = GradNet(self._backward_model) - self._aggregation_fn = abs_max - - def __call__(self, inputs, targets): - """Call function for `Gradient`.""" - self._verify_data(inputs, targets) - inputs = unify_inputs(inputs) - targets = unify_targets(targets) - - weights = get_bp_weights(self._backward_model, *inputs, targets) - gradient = self._grad_net(*inputs, weights) - saliency = self._aggregation_fn(gradient) - return saliency - - @staticmethod - def _verify_data(inputs, targets): - """ - Verify the validity of the parsed inputs. - - Args: - inputs (Tensor): The inputs to be explained. - targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - """ - check_value_type('inputs', inputs, Tensor) - if len(inputs.shape) != 4: - raise ValueError(f'Argument inputs must be 4D Tensor. But got {len(inputs.shape)}D Tensor.') - check_value_type('targets', targets, (Tensor, int)) - if isinstance(targets, Tensor): - if len(targets.shape) > 1 or (len(targets.shape) == 1 and len(targets) != len(inputs)): - raise ValueError('Argument targets must be a 1D or 0D Tensor. If it is a 1D Tensor, ' - 'it should have the same length as inputs.') diff --git a/mindspore/explainer/explanation/_attribution/_backprop/intermediate_layer.py b/mindspore/explainer/explanation/_attribution/_backprop/intermediate_layer.py deleted file mode 100644 index 73785e05011..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/intermediate_layer.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Base class IntermediateLayerAttribution""" - -from mindspore.explainer._utils import resize as resize_fn - -from .gradient import Gradient - - -class IntermediateLayerAttribution(Gradient): - """ - Base class for generating _attribution map at intermediate layer. - - Args: - network (nn.Cell): DNN model to be explained. - layer (str, optional): string that specifies the layer to generate - intermediate _attribution. When using default value, the input layer - will be specified. Default: ''. - - Raises: - TypeError: Be raised for any argument type problem. - """ - - def __init__(self, network, layer=''): - super(IntermediateLayerAttribution, self).__init__(network) - - # Whether resize the _attribution layer to the input size. - self._resize = True - # string that specifies the resize mode. Default: 'nearest_neighbor'. - self._resize_mode = 'nearest_neighbor' - - self._layer = layer - - @staticmethod - def _resize_fn(attributions, inputs, mode): - """Resize the intermediate layer _attribution to the same size as inputs.""" - height, width = inputs.shape[2], inputs.shape[3] - return resize_fn(attributions, (height, width), mode) diff --git a/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py b/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py deleted file mode 100644 index 28b1e5d2d59..00000000000 --- a/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Explainer with modified ReLU.""" - -import mindspore.nn as nn -import mindspore.ops.operations as op -from mindspore.explainer._utils import ( - deprecated_error, - unify_inputs, - unify_targets, -) - -from .backprop_utils import GradNet, get_bp_weights -from .gradient import Gradient - - -class ModifiedReLU(Gradient): - """Basic class for modified ReLU explanation.""" - - def __init__(self, network, use_relu_backprop=False): - super(ModifiedReLU, self).__init__(network) - self.use_relu_backprop = use_relu_backprop - self._hook_relu_backward() - self._grad_net = GradNet(self._backward_model) - - def __call__(self, inputs, targets): - """ - Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop". - - Args: - inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Returns: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument type problem. - ValueError: Be raised for any argument value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - - self._verify_data(inputs, targets) - inputs = unify_inputs(inputs) - targets = unify_targets(targets) - - weights = get_bp_weights(self._backward_model, inputs, targets) - gradients = self._grad_net(*inputs, weights) - saliency = self._aggregation_fn(gradients) - - return saliency - - def _hook_relu_backward(self): - """Set backward hook for ReLU layers.""" - for _, cell in self._backward_model.cells_and_names(): - if isinstance(cell, nn.ReLU): - cell.register_backward_hook(self._backward_hook) - - def _backward_hook(self, _, grad_inputs, grad_outputs): - """Hook function for ReLU layers.""" - inputs = grad_inputs if self.use_relu_backprop else grad_outputs - relu = op.ReLU() - if isinstance(inputs, tuple): - return relu(*inputs) - return relu(inputs) - - -@deprecated_error -class Deconvolution(ModifiedReLU): - """ - Deconvolution explanation. - - Deconvolution method is a modified version of Gradient method. For the original ReLU operation in the network to be - explained, Deconvolution modifies the propagation rule from directly backpropagating gradients to backprpagating - positive gradients. - - Note: - The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`. - If you want to train the `network` afterwards, please reset it back to training mode through the opposite - operations. To use `Deconvolution`, the `ReLU` operations in the network must be implemented with - `mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be - correct. - - Args: - network (Cell): The black-box model to be explained. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument or input type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import Deconvolution - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> deconvolution = Deconvolution(net) - >>> # parse data and the target label to be explained and get the saliency map - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> label = 5 - >>> saliency = deconvolution(inputs, label) - >>> print(saliency.shape) - (1, 1, 32, 32) - """ - - def __init__(self, network): - super(Deconvolution, self).__init__(network, use_relu_backprop=True) - - -@deprecated_error -class GuidedBackprop(ModifiedReLU): - """ - Guided-Backpropagation explanation. - - Guided-Backpropagation method is an extension of Gradient method. On top of the original ReLU operation in the - network to be explained, Guided-Backpropagation introduces another ReLU operation to filter out the negative - gradients during backpropagation. - - Note: - The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`. - If you want to train the `network` afterwards, please reset it back to training mode through the opposite - operations. To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with - `mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be - correct. - - Args: - network (Cell): The black-box model to be explained. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument or input type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import GuidedBackprop - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> gbp = GuidedBackprop(net) - >>> # feed data and the target label to be explained and get the saliency map - >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> label = 5 - >>> saliency = gbp(inputs, label) - >>> print(saliency.shape) - (1, 1, 32, 32) - """ - - def __init__(self, network): - super(GuidedBackprop, self).__init__(network, use_relu_backprop=False) diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/__init__.py b/mindspore/explainer/explanation/_attribution/_perturbation/__init__.py deleted file mode 100644 index 0fa398383f3..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -""" Perturbation-based _attribution explainer. """ diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/ablation.py b/mindspore/explainer/explanation/_attribution/_perturbation/ablation.py deleted file mode 100644 index 6b81be0da65..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/ablation.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Modules to ablate images.""" - -__all__ = [ - 'Ablation', - 'AblationWithSaliency', -] - -import math -from functools import reduce -from typing import Optional, Union - -import numpy as np - -from .replacement import Constant -from ...._utils import rank_pixels - - -class Ablation: - """ - Base class to ablate image based on given replacement. - - Args: - perturb_mode (str): Perturbation mode. - - Inputs: - inputs (np.ndarray): Input array to perturb. The first dim of inputs is assumed to be the batch size, i.e., - number of samples. - reference (np.ndarray or float): Array of values to replace the elements in the original inputs. The shape - of reference must match the inputs. If scalar is provided, the perturbed elements will be assigned the - given value.. - masks (np.ndarray): Several boolean array to mark the perturbed positions. True marks the pixels to be - perturbed, otherwise the pixels will be kept. The shape of masks is assumed to be - [batch_size, num_perturbations, inputs_shape[1:]]. - - Returns: - np.ndarray, perturbations. - - Raises: - TypeError: Be raised for any input type problem. - ValueError: Be raised for any input value problem. - """ - - def __init__(self, perturb_mode: str): - self._perturb_mode = perturb_mode - - def __call__(self, - inputs: np.array, - reference: Union[np.array, float], - masks: np.array - ) -> np.array: - - """Generate perturbations of given array.""" - if isinstance(reference, float): - reference = Constant(base_value=reference)(inputs) - - if not np.array_equal(inputs.shape, reference.shape): - raise ValueError('reference must have the same shape as inputs.') - - num_perturbations = masks.shape[1] - - if self._perturb_mode == 'Insertion': - inputs, reference = reference, inputs - - perturbations = np.repeat(inputs[:, None, :], num_perturbations, 1) - reference = np.repeat(reference[:, None, :], num_perturbations, 1) - Ablation._assign(perturbations, reference, masks) - - return perturbations - - @staticmethod - def _assign(original_array: np.ndarray, replacement: np.ndarray, masks: np.ndarray): - """Assign values to perturb pixels on perturbations.""" - if masks.dtype != bool: - raise TypeError('The param "masks" should be an array of bool, but receive {}'.format(masks.dtype)) - - if not np.array_equal(original_array.shape, masks.shape): - raise ValueError('masks must have the shape {} same as [batch_size, num_perturbations, inputs.shape[1:],' - 'but receive {}.'.format(original_array.shape, masks.shape)) - - original_array[masks] = replacement[masks] - - -class AblationWithSaliency(Ablation): - """ - Perturbation generator to generate perturbations w.r.t a given saliency map. - - Args: - perturb_percent (float): percentage of pixels to perturb - perturb_mode (str): specify perturbing mode, through deleting or - inserting pixels. Current support: ['Deletion', 'Insertion']. - is_accumulate (bool): whether to accumulate the former perturbations to - the later perturbations. - perturb_pixel_per_step (int, optional): number of pixel to perturb - for each perturbation. If perturb_pixel_per_step is None, actual - perturb_pixel_per_step will be calculate by: - num_image_pixel * perturb_percent / num_perturb_steps. - Default: None - num_perturbations (int, optional): number of perturbations. If - num_perturbations if None, it will be calculated by: - num_image_pixel * perturb_percent / perturb_pixel_per_step. - Default: None - - """ - - def __init__(self, - perturb_mode: str, - perturb_percent: float = 1.0, - is_accumulate: bool = False, - perturb_pixel_per_step: Optional[int] = None, - num_perturbations: Optional[int] = None): - super().__init__(perturb_mode) - self._perturb_percent = perturb_percent - self._perturb_mode = perturb_mode - self._pixel_per_step = perturb_pixel_per_step - self._num_perturbations = num_perturbations - self._is_accumulate = is_accumulate - - def generate_mask(self, - saliency: np.ndarray, - num_channels: Optional[int] = None - ) -> np.ndarray: - """ - Generate mask for perturbations based on given saliency ranks. - - Args: - saliency (numpy.array): Perturbing masks will be generated based on the given saliency map. The shape of - saliency is expected to be: [batch_size, optional(num_channels), *spatial_size]. If multi-channel - saliency is provided, an averaged saliency will be taken to calculate pixel order in spatial dimension. - num_channels (optional[int]): Number of channels of the input data. In order to match the shape of inputs, - num_channels should be provided when input data have channels dimension, even if num_channel is 1. - If None is provided, the inputs is assumed to be no-channel data, and the generated mask will have - no channel dimension. Default: None. - - Return: - numpy.array, boolean masks for perturbation generation. - """ - - batch_size = saliency.shape[0] - has_channel = num_channels is not None - num_channels = 1 if num_channels is None else num_channels - - if has_channel: - saliency = saliency.mean(axis=1) - saliency_rank = rank_pixels(saliency, descending=True) - num_pixels = reduce(lambda x, y: x * y, saliency.shape[1:]) - - pixel_per_step, num_perturbations = self._check_and_format_perturb_param(num_pixels) - - masks = np.zeros((batch_size, num_perturbations, num_channels, saliency_rank.shape[1], saliency_rank.shape[2]), - dtype=np.bool) - - # If the perturbation is added accumulately, the factor should be 0 to preserve the low bound of indexing. - factor = 0 if self._is_accumulate else 1 - - for i in range(batch_size): - low_bound = 0 - up_bound = low_bound + pixel_per_step - for j in range(num_perturbations): - masks[i, j, :, ((saliency_rank[i] >= low_bound) & (saliency_rank[i] < up_bound))] = True - low_bound = up_bound * factor - up_bound += pixel_per_step - - masks = masks if has_channel else np.squeeze(masks, axis=2) - return masks - - def _check_and_format_perturb_param(self, num_pixels): - """ - Check whether the self._pixel_per_step and self._num_perturbation is valid. If the parameters are unreasonable, - this function will try to reassign the parameters and raise ValueError when reassignment is failed. - """ - if self._pixel_per_step: - pixel_per_step = self._pixel_per_step - num_perturbations = math.floor(num_pixels * self._perturb_percent / self._pixel_per_step) - if not num_perturbations: - raise ValueError("Number of perturbations is not valid. Please enlarge the value of perturb_percent or " - "reduce the value of pixel_per_step when instantiating AblationWithSaliency.") - elif self._num_perturbations: - pixel_per_step = math.floor(num_pixels * self._perturb_percent / self._num_perturbations) - num_perturbations = self._num_perturbations - else: - # If neither pixel_per_step or num_perturbations is provided, num_perturbations is determined by the square - # root of product from the spatial size of saliency map. - num_perturbations = math.floor(np.sqrt(num_pixels)) - pixel_per_step = math.floor(num_pixels * self._perturb_percent / num_perturbations) - - return pixel_per_step, num_perturbations diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py b/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py deleted file mode 100644 index 344a22a2f1a..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Occlusion explainer.""" - -from typing import Tuple - -import numpy as np - -import mindspore as ms -import mindspore.nn as nn -from .ablation import Ablation -from .perturbation import PerturbationAttribution -from .replacement import Constant -from ...._utils import abs_max, deprecated_error - - -def _generate_patches(array, window_size: Tuple, strides: Tuple): - """Generate patches from image w.r.t given window_size and strides.""" - window_strides = array.strides - slices = tuple(slice(None, None, stride) for stride in strides) - indexing_strides = array[slices].strides - win_indices_shape = (np.array(array.shape) - np.array(window_size)) // np.array(strides) + 1 - - patches_shape = tuple(win_indices_shape) + window_size - strides_in_memory = indexing_strides + window_strides - patches = np.lib.stride_tricks.as_strided(array, shape=patches_shape, strides=strides_in_memory, writeable=False) - patches = patches.reshape((-1,) + window_size) - return patches - - -@deprecated_error -class Occlusion(PerturbationAttribution): - """ - Occlusion uses a sliding window to replace the pixels with a reference value (e.g. constant value), and computes - the output difference w.r.t the original output. The output difference caused by perturbed pixels are assigned as - feature importance to those pixels. For pixels involved in multiple sliding windows, the feature importance is the - averaged differences from multiple sliding windows. - - For more details, please refer to the original paper via: ``_. - - Args: - network (Cell): The black-box model to be explained. - activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For - single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification - tasks,`nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long - as when combining this function with network, the final output is the probability of the input. - perturbation_per_eval (int, optional): Number of perturbations for each inference during inferring the - perturbed samples. Within the memory capacity, usually the larger this number is, the faster the - explanation is obtained. Default: 32. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer. - If it is a 1D tensor, its length should be the same as `inputs`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps. - - Raises: - TypeError: Be raised for any argument or input type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Example: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import Occlusion - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> # initialize Occlusion explainer with the pretrained model and activation function - >>> activation_fn = ms.nn.Softmax() # softmax layer is applied to transform logits to probabilities - >>> occlusion = Occlusion(net, activation_fn=activation_fn) - >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) - >>> label = ms.Tensor([1], ms.int32) - >>> saliency = occlusion(input_x, label) - >>> print(saliency.shape) - (1, 1, 32, 32) - """ - - def __init__(self, network, activation_fn, perturbation_per_eval=32): - super().__init__(network, activation_fn, perturbation_per_eval) - - self._ablation = Ablation(perturb_mode='Deletion') - self._aggregation_fn = abs_max - self._get_replacement = Constant(base_value=0.0) - self._num_sample_per_dim = 32 # specify the number of perturbations each dimension. - - def __call__(self, inputs, targets): - """Call function for 'Occlusion'.""" - self._verify_data(inputs, targets) - - inputs = inputs.asnumpy() - targets = targets.asnumpy() if isinstance(targets, ms.Tensor) else np.array([targets], np.int) - - batch_size = inputs.shape[0] - window_size, strides = self._get_window_size_and_strides(inputs) - - full_network = nn.SequentialCell([self._network, self._activation_fn]) - - original_outputs = full_network(ms.Tensor(inputs, ms.float32)).asnumpy()[np.arange(batch_size), targets] - - masks = Occlusion._generate_masks(inputs, window_size, strides) - - return self._perturbate(batch_size, full_network, (original_outputs, masks, inputs, targets)) - - def _perturbate(self, batch_size, full_network, data): - """Perform perturbations.""" - original_outputs, masks, inputs, targets = data - total_attribution = np.zeros_like(inputs) - weights = np.ones_like(inputs) - num_perturbations = masks.shape[1] - reference = self._get_replacement(inputs) - - count = 0 - while count < num_perturbations: - ith_masks = masks[:, count:min(count+self._perturbation_per_eval, num_perturbations)] - actual_num_eval = ith_masks.shape[1] - num_samples = batch_size * actual_num_eval - occluded_inputs = self._ablation(inputs, reference, ith_masks) - occluded_inputs = occluded_inputs.reshape((-1, *inputs.shape[1:])) - targets_repeat = np.repeat(targets, repeats=actual_num_eval, axis=0) - occluded_outputs = full_network( - ms.Tensor(occluded_inputs, ms.float32)).asnumpy()[np.arange(num_samples), targets_repeat] - original_outputs_repeat = np.repeat(original_outputs, repeats=actual_num_eval, axis=0) - outputs_diff = original_outputs_repeat - occluded_outputs - total_attribution += ( - outputs_diff.reshape(ith_masks.shape[:2] + (1,) * (len(masks.shape) - 2)) * ith_masks).sum(axis=1) - weights += ith_masks.sum(axis=1) - count += actual_num_eval - attribution = self._aggregation_fn(ms.Tensor(total_attribution / weights, ms.float32)) - return attribution - - def _get_window_size_and_strides(self, inputs): - """ - Return window_size and strides. - - # If spatial size of input data is smaller than self._num_sample_per_dim, window_size and strides will set to - # `(C, 3, 3)` and `(C, 1, 1)` separately. Otherwise, the window_size and strides will generated adaptively to - match self._num_sample_per_dim. - """ - window_size = tuple( - [inputs.shape[1]] - + [x // self._num_sample_per_dim if x > self._num_sample_per_dim else 3 for x in inputs.shape[2:]]) - strides = tuple( - [inputs.shape[1]] - + [x // self._num_sample_per_dim if x > self._num_sample_per_dim else 1 for x in inputs.shape[2:]]) - return window_size, strides - - @staticmethod - def _generate_masks(inputs, window_size, strides): - """Generate masks to perturb contiguous regions.""" - total_dim = np.prod(inputs.shape[1:]).item() - template = np.arange(total_dim).reshape(inputs.shape[1:]) - indices = _generate_patches(template, window_size, strides) - num_perturbations = indices.shape[0] - indices = indices.reshape(num_perturbations, -1) - - mask = np.zeros((num_perturbations, total_dim), dtype=np.bool) - for i in range(num_perturbations): - mask[i, indices[i]] = True - mask = mask.reshape((num_perturbations,) + inputs.shape[1:]) - - masks = np.tile(mask, reps=(inputs.shape[0],) + (1,) * len(mask.shape)) - return masks diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/perturbation.py b/mindspore/explainer/explanation/_attribution/_perturbation/perturbation.py deleted file mode 100644 index c28df5e3123..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/perturbation.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Base class `PerturbationAttribtuion`""" - -from mindspore.train._utils import check_value_type -from mindspore.nn import Cell - -from ..attribution import Attribution - - -class PerturbationAttribution(Attribution): - """ - Base class for perturbation-based attribution methods. - - All perturbation-based _attribution methods extend from this class. - """ - - def __init__(self, - network, - activation_fn, - perturbation_per_eval, - ): - super(PerturbationAttribution, self).__init__(network) - check_value_type("activation_fn", activation_fn, Cell) - self._activation_fn = activation_fn - check_value_type('perturbation_per_eval', perturbation_per_eval, int) - if perturbation_per_eval <= 0: - raise ValueError('Argument perturbation_per_eval should be a positive integer.') - self._perturbation_per_eval = perturbation_per_eval diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/replacement.py b/mindspore/explainer/explanation/_attribution/_perturbation/replacement.py deleted file mode 100644 index 4ddf8e72fe8..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/replacement.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Modules to generate perturbations.""" - -import numpy as np -from scipy.ndimage.filters import gaussian_filter - -_Array = np.ndarray - -__all__ = [ - 'BaseReplacement', - 'Constant', - 'GaussianBlur', - 'RandomPerturb', -] - - -class BaseReplacement: - """ - Base class of generator for generating different replacement for perturbations. - - Args: - kwargs: Optional args for generating replacement. Derived class need to - add necessary arg names and default value to '_necessary_args'. - If the argument has no default value, the value should be set to - 'EMPTY' to mark the required args. Initializing an object will - check the given kwargs w.r.t '_necessary_args'. - - Raises: - ValueError: Raise when provided kwargs not contain necessary arg names with 'EMPTY' mark. - """ - _necessary_args = {} - - def __init__(self, **kwargs): - self._replace_args = self._necessary_args.copy() - for key, value in self._replace_args.items(): - if key in kwargs.keys(): - self._replace_args[key] = kwargs[key] - elif key not in kwargs.keys() and value == 'EMPTY': - raise ValueError(f"Missing keyword arg {key} for {self.__class__.__name__}.") - - def __call__(self, inputs): - raise NotImplementedError() - - -class Constant(BaseReplacement): - """Generator to provide constant-value replacement for perturbations.""" - _necessary_args = {'base_value': 'EMPTY'} - - def __call__(self, inputs: _Array) -> _Array: - replacement = np.ones_like(inputs, dtype=np.float32) - replacement *= self._replace_args['base_value'] - return replacement - - -class GaussianBlur(BaseReplacement): - """Generator to provided gaussian blurred inputs for perturbation""" - _necessary_args = {'sigma': 0.7} - - def __call__(self, inputs: _Array) -> _Array: - sigma = self._replace_args['sigma'] - replacement = gaussian_filter(inputs, sigma=sigma) - return replacement - - -class RandomPerturb(BaseReplacement): - """Generator to provide replacement by randomly adding noise.""" - _necessary_args = {'radius': 0.2} - - def __call__(self, inputs: _Array) -> _Array: - radius = self._replace_args['radius'] - outputs = inputs + (2 * np.random.rand(*inputs.shape) - 1) * radius - return outputs diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/rise.py b/mindspore/explainer/explanation/_attribution/_perturbation/rise.py deleted file mode 100644 index f14416a80f6..00000000000 --- a/mindspore/explainer/explanation/_attribution/_perturbation/rise.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""RISE.""" -import math - -import numpy as np - -from mindspore import Tensor -from mindspore.train._utils import check_value_type - -from .perturbation import PerturbationAttribution -from .... import _operators as op -from ...._utils import resize, deprecated_error - - -@deprecated_error -class RISE(PerturbationAttribution): - r""" - RISE: Randomized Input Sampling for Explanation of Black-box Model. - - RISE is a perturbation-based method that generates attribution maps by sampling on multiple random binary masks. - The original image is randomly masked, and then fed into the black-box model to get predictions. The final - attribution map is the weighted sum of these random masks, with the weights being the corresponding output on the - node of interest: - - .. math:: - attribution = \sum_{i}f_c(I\odot M_i) M_i - - For more details, please refer to the original paper via: `RISE `_. - - Args: - network (Cell): The black-box model to be explained. - activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For - single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification - tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long - as when combining this function with network, the final output is the probability of the input. - perturbation_per_eval (int, optional): Number of perturbations for each inference during inferring the - perturbed samples. Within the memory capacity, usually the larger this number is, the faster the - explanation is obtained. Default: 32. - - Inputs: - - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`. - - **targets** (Tensor, int) - The labels of interest to be explained. When `targets` is an integer, - all of the inputs will generates attribution map w.r.t this integer. When `targets` is a tensor, it - should be of shape :math:`(N, l)` (l being the number of labels for each sample) or :math:`(N,)` :math:`()`. - - Outputs: - Tensor, a 4D tensor of shape :math:`(N, l, H, W)` when targets is a tensor of shape (N, l), otherwise a tensor - of shape (N, 1, H, w), saliency maps. - - Raises: - TypeError: Be raised for any argument or input type problem. - ValueError: Be raised for any input value problem. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> import mindspore as ms - >>> from mindspore.explainer.explanation import RISE - >>> from mindspore import context - >>> - >>> context.set_context(mode=context.PYNATIVE_MODE) - >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py - >>> net = LeNet5(10, num_channel=3) - >>> # initialize RISE explainer with the pretrained model and activation function - >>> activation_fn = ms.nn.Softmax() # softmax layer is applied to transform logits to probabilities - >>> rise = RISE(net, activation_fn=activation_fn) - >>> # given an instance of RISE, saliency map can be generate - >>> inputs = ms.Tensor(np.random.rand(2, 3, 32, 32), ms.float32) - >>> # when `targets` is an integer - >>> targets = 5 - >>> saliency = rise(inputs, targets) - >>> print(saliency.shape) - (2, 1, 32, 32) - >>> # `targets` can also be a 2D tensor - >>> targets = ms.Tensor([[5], [1]], ms.int32) - >>> saliency = rise(inputs, targets) - >>> print(saliency.shape) - (2, 1, 32, 32) -""" - - def __init__(self, - network, - activation_fn, - perturbation_per_eval=32): - super(RISE, self).__init__(network, activation_fn, perturbation_per_eval) - - self._num_masks = 6000 # number of masks to be sampled - self._mask_probability = 0.5 # ratio of inputs to be masked - self._down_sample_size = 10 # the original size of binary masks - self._resize_mode = 'bilinear' # mode choice to resize the down-sized binary masks to size of the inputs - self._perturbation_mode = 'constant' # setting the perturbed pixels to a constant value - self._base_value = 0 # setting the perturbed pixels to this constant value - self._num_classes = None # placeholder of self._num_classes just for future assignment in other methods - - def _generate_masks(self, data, batch_size): - """Generate a batch of binary masks for data.""" - - height, width = data.shape[2], data.shape[3] - - mask_size = (self._down_sample_size, self._down_sample_size) - - up_size = (height + mask_size[0], width + mask_size[1]) - mask = np.random.random((batch_size, 1) + mask_size) < self._mask_probability - upsample = resize(op.Tensor(mask, data.dtype), up_size, - self._resize_mode).asnumpy() - shift_x = np.random.randint(0, mask_size[0] + 1, size=batch_size) - shift_y = np.random.randint(0, mask_size[1] + 1, size=batch_size) - - masks = [sample[:, x_i: x_i + height, y_i: y_i + width] for sample, x_i, y_i - in zip(upsample, shift_x, shift_y)] - masks = Tensor(np.array(masks), data.dtype) - return masks - - def __call__(self, inputs, targets): - """Generates attribution maps for inputs.""" - self._verify_data(inputs, targets) - height, width = inputs.shape[2], inputs.shape[3] - - if self._num_classes is None: - self._num_classes = self.network(inputs).shape[1] - - # Due to the unsupported Op of slice assignment, we use numpy array here - targets = self._unify_targets(inputs, targets) - - attr_np = np.zeros(shape=(inputs.shape[0], targets.shape[1], height, width)) - - cal_times = math.ceil(self._num_masks / self._perturbation_per_eval) - - for idx, data in enumerate(inputs): - bg_data = data * 0 + self._base_value - data = op.reshape(data, (1, -1, height, width)) - for j in range(cal_times): - bs = min(self._num_masks - j * self._perturbation_per_eval, - self._perturbation_per_eval) - - masks = self._generate_masks(data, bs) - - weights = masks * data + (1 - masks) * bg_data - weights = self._activation_fn(self.network(weights)) - while len(weights.shape) > 2: - weights = op.mean(weights, axis=2) - - weights = np.expand_dims(np.expand_dims(weights.asnumpy()[:, targets[idx]], 2), 3) - - attr_np[idx] += np.sum(weights * masks.asnumpy(), axis=0) - - attr_np = attr_np / self._num_masks - - return op.Tensor(attr_np, dtype=inputs.dtype) - - @staticmethod - def _verify_data(inputs, targets): - """Verify the validity of the parsed inputs.""" - check_value_type('inputs', inputs, Tensor) - if len(inputs.shape) != 4: - raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.') - check_value_type('targets', targets, (Tensor, int, tuple, list)) - if isinstance(targets, Tensor): - if len(targets.shape) > 2: - raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. ' - 'But got {}D.'.format(len(targets.shape))) - if targets.shape and len(targets) != len(inputs): - raise ValueError( - 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format( - len(inputs), len(targets))) - - @staticmethod - def _unify_targets(inputs, targets): - """To unify targets to be 2D numpy.ndarray.""" - if isinstance(targets, int): - return np.array([[targets] for _ in inputs]).astype(np.int) - if isinstance(targets, Tensor): - if not targets.shape: - return np.array([[targets.asnumpy()] for _ in inputs]).astype(np.int) - if len(targets.shape) == 1: - return np.array([[t.asnumpy()] for t in targets]).astype(np.int) - if len(targets.shape) == 2: - return np.array([t.asnumpy() for t in targets]).astype(np.int) - return targets diff --git a/mindspore/explainer/explanation/_attribution/attribution.py b/mindspore/explainer/explanation/_attribution/attribution.py deleted file mode 100644 index 9b9725cc63e..00000000000 --- a/mindspore/explainer/explanation/_attribution/attribution.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Attribution.""" - -from typing import Callable - -import mindspore as ms -import mindspore.nn as nn -from mindspore.train._utils import check_value_type - - -class Attribution: - """ - Basic class of attributing the salient score - - The explainers which explanation through attributing the relevance scores should inherit this class. - - Args: - network (nn.Cell): The black-box model to be explained. - """ - - def __init__(self, network): - check_value_type("network", network, nn.Cell) - self._network = network - self._network.set_train(False) - self._network.set_grad(False) - - @staticmethod - def _verify_network(network): - """Verify the input `network` for __init__ function.""" - if not isinstance(network, nn.Cell): - raise TypeError("The parsed `network` must be a `mindspore.nn.Cell` object.") - - __call__: Callable - """ - The explainers return the explanations by calling directly on the explanation. - Derived class should overwrite this implementations for different - algorithms. - - Args: - input (ms.Tensor): Input tensor to be explained. - - Returns: - - saliency map (ms.Tensor): saliency map of the input. - """ - - @property - def network(self): - """Return the model.""" - return self._network - - @staticmethod - def _verify_data(inputs, targets): - """Verify the validity of the parsed inputs.""" - check_value_type('inputs', inputs, ms.Tensor) - if len(inputs.shape) != 4: - raise ValueError('Argument inputs must be 4D Tensor') - check_value_type('targets', targets, (ms.Tensor, int)) - if isinstance(targets, ms.Tensor): - if len(targets.shape) > 1 or (len(targets.shape) == 1 and len(targets) != len(inputs)): - raise ValueError('Argument targets must be a 1D or 0D Tensor. If it is a 1D Tensor, ' - 'it should have the same length as inputs.') - elif inputs.shape[0] != 1: - raise ValueError('If targets have type of int, batch_size of inputs should equals 1. Receive batch_size {}' - .format(inputs.shape[0])) diff --git a/mindspore/explainer/explanation/_counterfactual/__init__.py b/mindspore/explainer/explanation/_counterfactual/__init__.py deleted file mode 100644 index 4b8d0b727be..00000000000 --- a/mindspore/explainer/explanation/_counterfactual/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Counterfactual modules.""" diff --git a/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py b/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py deleted file mode 100644 index f5e6ecb1721..00000000000 --- a/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py +++ /dev/null @@ -1,1025 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Hierarchical occlusion edit tree searcher.""" -from enum import Enum -import copy -import re -import math - -import numpy as np -from scipy.ndimage import gaussian_filter - -from mindspore import nn -from mindspore import Tensor -from mindspore.ops import Squeeze -from mindspore.train._utils import check_value_type -from mindspore.explainer._utils import deprecated_error - - -AUTO_LAYER_MAX = 3 # maximum number of layer by auto settings -AUTO_WIN_SIZE_MIN = 28 # minimum window size by auto settings -AUTO_WIN_SIZE_DIV = 2 # denominator of windows size calculations by auto settings -AUTO_STRIDE_DIV = 5 # denominator of stride calculations by auto settings -AUTO_MASK_GAUSSIAN_RADIUS_DIV = 25 # denominator of gaussian mask radius calculations by auto settings -DEFAULT_THRESHOLD = 0.5 # default target prediction threshold -DEFAULT_BATCH_SIZE = 64 # default batch size for batch inference search -MASK_GAUSSIAN_RE = r'^gaussian:(\d+)$' # gaussian mask string pattern - -# minimum length of input images' short side with auto settings -AUTO_IMAGE_SHORT_SIDE_MIN = AUTO_WIN_SIZE_MIN * AUTO_WIN_SIZE_DIV - - -@deprecated_error -def is_valid_str_mask(mask): - """Check if it is a valid string mask.""" - check_value_type('mask', mask, str) - match = re.match(MASK_GAUSSIAN_RE, mask) - return match and int(match.group(1)) > 0 - - -@deprecated_error -def compile_mask(mask, image): - """Compile mask to a ready to use object.""" - if mask is None: - return compile_str_mask(auto_str_mask(image), image) - check_value_type('mask', mask, (str, tuple, float, np.ndarray)) - if isinstance(mask, str): - return compile_str_mask(mask, image) - - if isinstance(mask, tuple): - _check_iterable_type('mask', mask, tuple, float) - elif isinstance(mask, np.ndarray): - if len(image.shape) == 4 and len(mask.shape) == 3: - mask = np.expand_dims(mask, axis=0) - elif len(image.shape) == 3 and len(mask.shape) == 4 and mask.shape[0] == 1: - mask = mask.squeeze(0) - if image.shape != mask.shape: - raise ValueError("Image and mask is not match in shape.") - return mask - - -@deprecated_error -def auto_str_mask(image): - """Generate auto string mask for the image.""" - check_value_type('image', image, np.ndarray) - short_side = np.min(image.shape[-2:]) - radius = int(round(short_side/AUTO_MASK_GAUSSIAN_RADIUS_DIV)) - if radius == 0: - raise ValueError(f"Input image's short side:{short_side} is too small for auto mask, " - f"at least {AUTO_MASK_GAUSSIAN_RADIUS_DIV}pixels is required.") - return f'gaussian:{radius}' - - -@deprecated_error -def compile_str_mask(mask, image): - """Concert string mask to numpy.ndarray.""" - check_value_type('mask', mask, str) - check_value_type('image', image, np.ndarray) - match = re.match(MASK_GAUSSIAN_RE, mask) - if match: - radius = int(match.group(1)) - if radius > 0: - sigma = [0] * len(image.shape) - sigma[-2] = radius - sigma[-1] = radius - return gaussian_filter(image, sigma=sigma, mode='nearest') - raise ValueError(f"Invalid string mask: '{mask}'.") - - -@deprecated_error -class EditStep: - """ - Edit step that describes a box region, also represents an edit tree. - - Args: - layer (int): Layer number, -1 is root layer, 0 or above is normal edit layer. - box (tuple[int, int, int, int]): Tuple of x, y, width, height. - """ - def __init__(self, layer, box): - self.layer = layer - self.box = box - self.network_output = 0 - self.step_change = 0 - self.children = None - - @property - def x(self): - """X-coordinate of the box.""" - return self.box[0] - - @property - def y(self): - """Y-coordinate of the box.""" - return self.box[1] - - @property - def width(self): - """Width of the box.""" - return self.box[2] - - @property - def height(self): - """Height of the box.""" - return self.box[3] - - @property - def is_leaf(self): - """Returns True if no child edit step.""" - return not self.children - - @property - def leaf_steps(self): - """Returns all leaf edit steps in the tree.""" - if self.is_leaf: - return [self] - steps = [] - for child in self.children: - steps.extend(child.leaf_steps) - return steps - - @property - def max_layer(self): - """Maximum layer number in the edit tree.""" - if self.is_leaf: - return self.layer - layer = self.layer - for child in self.children: - child_max_layer = child.max_layer - if child_max_layer > layer: - layer = child_max_layer - return layer - - def add_child(self, child): - """Add a child edit step.""" - if self.children is None: - self.children = [child] - else: - self.children.append(child) - - def remove_all_children(self): - """Remove all child steps.""" - self.children = None - - def get_layer_or_leaf_steps(self, layer): - """Get all edit steps of the layer and all leaf edit steps above the layer.""" - if self.layer == layer or (self.layer < layer and self.is_leaf): - return [self] - steps = [] - if self.layer < layer and self.children: - for child in self.children: - steps.extend(child.get_layer_or_leaf_steps(layer)) - return steps - - def get_layer_steps(self, layer): - """Get all edit steps of the layer.""" - if self.layer == layer: - return [self] - steps = [] - if self.layer < layer and self.children: - for child in self.children: - steps.extend(child.get_layer_steps(layer)) - return steps - - @classmethod - def apply(cls, - image, - mask, - edit_steps, - by_masking=False, - inplace=False): - """ - Apply edit steps. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be - str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9. - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - numpy.ndarray: Image mask in CHW or NCHW(N=1) format. - edit_steps (list[EditStep], optional): Edit steps to be applied. - by_masking (bool): Whether it is masking mode. - inplace (bool): Whether the modification is going to take place in the input image tensor. False to - construct a new image tensor as result. - - Returns: - numpy.ndarray, the result image tensor. - - Raises: - TypeError: Be raised for any argument or data type problem. - ValueError: Be raised for any argument or data value problem. - """ - if by_masking: - return cls.apply_masking(image, mask, edit_steps, inplace) - return cls.apply_unmasking(image, mask, edit_steps, inplace) - - @classmethod - def apply_masking(cls, - image, - mask, - edit_steps, - inplace=False): - """ - Apply edit steps in masking mode. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be - str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9. - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - numpy.ndarray: Image mask in CHW or NCHW(N=1) format. - edit_steps (list[EditStep], optional): Edit steps to be applied. - inplace (bool): Whether the modification is going to take place in the input image tensor. False to - construct a new image tensor as result. - - Returns: - numpy.ndarray, the result image tensor. - - Raises: - TypeError: Be raised for any argument or data type problem. - ValueError: Be raised for any argument or data value problem. - """ - - cls._apply_check_args(image, mask, edit_steps) - - mask = compile_mask(mask, image) - - background = image if inplace else np.copy(image) - - if not edit_steps: - return background - - for step in edit_steps: - - x_max, y_max = cls._get_step_xy_max(step, background.shape[-1], background.shape[-2]) - - if x_max <= step.x or y_max <= step.y: - continue - - if isinstance(mask, np.ndarray): - background[..., step.y:y_max, step.x:x_max] = mask[..., step.y:y_max, step.x:x_max] - else: - if isinstance(mask, (int, float)): - mask = (mask, mask, mask) - for c in range(3): - background[..., c, step.y:y_max, step.x:x_max] = mask[c] - return background - - @classmethod - def apply_unmasking(cls, - image, - mask, - edit_steps, - inplace=False): - """ - Apply edit steps in unmasking mode. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be - str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9. - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - numpy.ndarray: Image mask in CHW or NCHW(N=1) format. - edit_steps (list[EditStep]): Edit steps to be applied. - inplace (bool): Whether the modification is going to take place in the input mask tensor. False to - construct a new image tensor as result. - - Returns: - numpy.ndarray, the result image tensor. - - Raises: - TypeError: Be raised for any argument or data type problem. - ValueError: Be raised for any argument or data value problem. - """ - - cls._apply_check_args(image, mask, edit_steps) - - mask = compile_mask(mask, image) - - if isinstance(mask, np.ndarray): - if inplace: - background = mask - else: - background = np.copy(mask) - else: - if inplace: - raise ValueError('Inplace cannot be True when mask is not a numpy.ndarray') - - background = np.zeros_like(image) - if isinstance(mask, (int, float)): - background.fill(mask) - else: - for c in range(3): - background[..., c, :, :] = mask[c] - - if not edit_steps: - return background - - for step in edit_steps: - - x_max, y_max = cls._get_step_xy_max(step, background.shape[-1], background.shape[-2]) - - if x_max <= step.x or y_max <= step.y: - continue - - background[..., step.y:y_max, step.x:x_max] = image[..., step.y:y_max, step.x:x_max] - - return background - - @staticmethod - def _apply_check_args(image, mask, edit_steps): - """ - Check arguments for apply edit steps. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be - str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9. - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - numpy.ndarray: Image mask in CHW or NCHW(N=1) format. - edit_steps (list[EditStep], optional): Edit steps to be applied. - - Raises: - TypeError: Be raised for any argument or data type problem. - ValueError: Be raised for any argument or data value problem. - """ - check_value_type('image', image, np.ndarray) - check_value_type('mask', mask, (str, tuple, float, np.ndarray)) - if isinstance(mask, tuple): - _check_iterable_type('mask', mask, tuple, float) - - if edit_steps is not None: - _check_iterable_type('edit_steps', edit_steps, (tuple, list), EditStep) - - @staticmethod - def _get_step_xy_max(step, x_limit, y_limit): - """Get the step x and y max. position.""" - x_max = step.x + step.width - y_max = step.y + step.height - - if x_max > x_limit: - x_max = x_limit - - if y_max > y_limit: - y_max = y_limit - return x_max, y_max - - -class NoValidResultError(RuntimeError): - """Error for no edit step layer's network output meet the threshold.""" - - -class OriginalOutputError(RuntimeError): - """Error for network output of the original image is not strictly larger than the threshold.""" - - -@deprecated_error -class Searcher: - """ - Edit step searcher. - - Args: - network (Cell): Image tensor in CHW or NCHW(N=1) format. - win_sizes (Union(list[int], optional): Moving square window size (length of side) of layers, - None means by auto calcuation. - strides (Union(list[int], optional): Stride of layers, None means by auto calcuation. - threshold (float): Threshold network output value of the target class. - by_masking (bool): Whether it is masking mode. - - Raises: - ValueError: Be raised for any data or settings' value problem. - TypeError: Be raised for any data or settings' type problem. - RuntimeError: Be raised if this function was invoked before. - - Supported Platforms: - ``Ascend`` ``GPU`` - """ - - def __init__(self, - network, - win_sizes=None, - strides=None, - threshold=DEFAULT_THRESHOLD, - by_masking=False): - - check_value_type('network', network, nn.Cell) - - if win_sizes is not None: - _check_iterable_type('win_sizes', win_sizes, list, int) - if not win_sizes: - raise ValueError('Argument win_sizes is empty.') - - for i in range(1, len(win_sizes)): - if win_sizes[i] >= win_sizes[i-1]: - raise ValueError('Argument win_sizes is not strictly descending.') - - if win_sizes[-1] <= 0: - raise ValueError('Argument win_sizes has non-positive number.') - elif strides is not None: - raise ValueError('Argument win_sizes cannot be None if strides is not None.') - - if strides is not None: - _check_iterable_type('strides', strides, list, int) - for i in range(1, len(strides)): - if strides[i] >= strides[i-1]: - raise ValueError('Argument win_sizes is not strictly descending.') - - if strides[-1] <= 0: - raise ValueError('Argument strides has non-positive number.') - - if len(strides) != len(win_sizes): - raise ValueError('Length of strides and win_sizes is not equal.') - elif win_sizes is not None: - raise ValueError('Argument strides cannot be None if win_sizes is not None.') - - self._network = copy.deepcopy(network) - self._compiled_mask = None - self._threshold = threshold - self._win_sizes = copy.copy(win_sizes) if win_sizes else None - self._strides = copy.copy(strides) if strides else None - self._by_masking = by_masking - - @property - def network(self): - """Get the network.""" - return self._network - - @property - def by_masking(self): - """Check if it is masking mode.""" - return self._by_masking - - @property - def threshold(self): - """The network output threshold to stop the search.""" - return self._threshold - - @property - def win_sizes(self): - """Windows sizes in pixels.""" - return self._win_sizes - - @property - def strides(self): - """Strides in pixels.""" - return self._strides - - @property - def compiled_mask(self): - """The compiled mask after a successful search() call.""" - return self._compiled_mask - - def search(self, image, class_idx, mask=None): - """ - Search smallest sufficient/destruction region on an image. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - class_idx (int): Target class index. - mask (Union[str, tuple[float, float, float], float], optional): The mask, type can be - str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9. - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - None: By auto calculation. - - Returns: - tuple[EditStep, list[float]], the root edit step and network output of each layer after applied the - layer steps. - - Raises: - TypeError: Be raised for any argument or data type problem. - ValueError: Be raised for any argument or data value problem. - NoValidResultError: Be raised if no valid result was found. - OriginalOutputError: Be raised if network output of the original image is not strictly larger than - the threshold. - """ - check_value_type('image', image, (Tensor, np.ndarray)) - - if isinstance(image, Tensor): - image = image.asnumpy() - - if len(image.shape) == 4: - if image.shape[0] != 1: - raise ValueError("Argument image's batch size is not 1.") - elif len(image.shape) == 3: - image = np.expand_dims(image, axis=0) - else: - raise ValueError("Argument image is not in CHW or NCHW(N=1) format.") - - check_value_type('class_idx', class_idx, int) - - if class_idx < 0: - raise ValueError("Argument class_idx is less then zero.") - - self._compiled_mask = compile_mask(mask, image) - - short_side = np.min(image.shape[-2:]) - if self._win_sizes is None: - win_sizes, strides = self._auto_win_sizes_strides(short_side) - else: - win_sizes, strides = self._win_sizes, self._strides - - if short_side <= win_sizes[0]: - raise ValueError(f"Input image's short side is shorter then or " - f"equals to the first window size:{win_sizes[0]}.") - - self._network.set_train(False) - - # the search result will be store as a edit tree that attached to the root step. - root_step = EditStep(-1, (0, 0, image.shape[-1], image.shape[-2])) - root_job = _SearchJob(by_masking=self._by_masking, - class_idx=class_idx, - win_sizes=win_sizes, - strides=strides, - layer=0, - search_field=root_step.box, - pre_edit_steps=None, - parent_step=root_step) - self._process_root_job(image, root_job) - return self._touch_result(image, class_idx, root_step) - - def _touch_result(self, image, class_idx, root_step): - """ - Final treatment to the search result. - - Args: - image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format. - class_idx (int): Target class index. - root_step (EditStep): The searched root step. - - Returns: - tuple[EditStep, list[float]], the root edit step and network output of each layer after applied the - layer steps. - - Raises: - NoValidResultError: Be raised if no valid result was found. - """ - # the leaf layer's network output may not meet the threshold, - # we have to cutoff the unqualified layers - layer_count = root_step.max_layer + 1 - if layer_count == 0: - raise NoValidResultError("No edit step layer was found.") - - # gather the network output of each layer - layer_outputs = [None] * layer_count - for layer in range(layer_count): - steps = root_step.get_layer_or_leaf_steps(layer) - if not steps: - continue - masked_image = EditStep.apply(image, self._compiled_mask, steps, by_masking=self._by_masking) - output = self._network(Tensor(masked_image)) - output = output[0, class_idx].asnumpy().item() - layer_outputs[layer] = output - - # determine which layer we have to cutoff - cutoff_layer = None - for layer in reversed(range(layer_count)): - if layer_outputs[layer] is not None and self._is_threshold_met(layer_outputs[layer]): - cutoff_layer = layer - break - - if cutoff_layer is None or root_step.is_leaf: - raise NoValidResultError(f"No edit step layer's network output meet the threshold: {self._threshold}.") - - # cutoff the layer by removing all children of the layer's steps. - steps = root_step.get_layer_steps(cutoff_layer) - for step in steps: - step.remove_all_children() - layer_outputs = layer_outputs[:cutoff_layer + 1] - - return root_step, layer_outputs - - def _process_root_job(self, sample_input, root_job): - """ - Process job queue. - - Args: - sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format. - root_job (_SearchJob): Root search job. - """ - job_queue = [root_job] - while job_queue: - job = job_queue.pop(0) - sub_job_queue = [] - job_edit_steps, stop_reason = self._process_job(job, sample_input, sub_job_queue) - - if stop_reason in (self._StopReason.THRESHOLD_MET, self._StopReason.STEP_CHANGE_MET): - for step in job_edit_steps: - job.parent_step.add_child(step) - job_queue.extend(sub_job_queue) - - def _prepare_job(self, job, sample_input): - """ - Prepare a job for process. - - Args: - job (_SearchJob): Search job to be processed. - sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format. - - Returns: - numpy.ndarray, the image tensor workpiece. - - Raises: - OriginalOutputError: Be raised if network output of the original image is not strictly larger than the - threshold. - """ - # make the network output with the original image is strictly larger than the threshold - if job.layer == 0: - original_output = self._network(Tensor(sample_input))[0, job.class_idx].asnumpy().item() - if original_output <= self._threshold: - raise OriginalOutputError(f'The original output is not strictly larger the threshold: ' - f'{self._threshold}') - - # applying the pre-edit steps from the parent steps - if job.pre_edit_steps: - # use the latest leaf steps to increase the accuracy - leaf_steps = [] - for step in job.pre_edit_steps: - leaf_steps.extend(step.leaf_steps) - pre_edit_steps = leaf_steps - else: - pre_edit_steps = None - workpiece = EditStep.apply(sample_input, - self._compiled_mask, - pre_edit_steps, - self._by_masking) - - job.on_start(sample_input, workpiece, self._compiled_mask, self._network) - return workpiece - - def _process_job(self, job, sample_input, job_queue): - """ - Process a job. - - Args: - job (_SearchJob): Search job to be processed. - sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format. - job_queue (list[_SearchJob]): Job queue. - - Returns: - tuple[list[EditStep], _StopReason], result edit stop and the stop reason. - - Raises: - OriginalOutputError: Be raised if network output of the original image is not strictly larger than the - threshold. - """ - workpiece = self._prepare_job(job, sample_input) - - start_output = self._network(Tensor(workpiece))[0, job.class_idx].asnumpy().item() - last_output = start_output - edit_steps = [] - # greedy search loop - while True: - - if self._is_threshold_met(last_output): - return edit_steps, self._StopReason.THRESHOLD_MET - - try: - best_edit = job.find_best_edit() - except _NoNewStepError: - return edit_steps, self._StopReason.NO_NEW_STEP - except _RepeatedStepError: - return edit_steps, self._StopReason.REPEATED_STEP - - best_edit.step_change = best_edit.network_output - last_output - - if job.layer < job.layer_count - 1 and self._is_greedy(best_edit.step_change): - # create net layer search job if new edit step is valid and not yet reaching - # the final layer - if job.pre_edit_steps: - pre_edit_steps = list(job.pre_edit_steps) - pre_edit_steps.extend(edit_steps) - else: - pre_edit_steps = list(edit_steps) - - sub_job = job.create_sub_job(best_edit, pre_edit_steps) - job_queue.append(sub_job) - - edit_steps.append(best_edit) - - if job.layer > 0: - # stop if the step change meet the parent step change only after layer 0 - change = best_edit.network_output - start_output - if self._is_step_change_met(job.parent_step.step_change, change): - return edit_steps, self._StopReason.STEP_CHANGE_MET - - last_output = best_edit.network_output - - def _is_threshold_met(self, network_output): - """Check if the threshold was met.""" - if self._by_masking: - return network_output <= self._threshold - return network_output >= self._threshold - - def _is_step_change_met(self, target, step_change): - """Check if the change target was met.""" - if self._by_masking: - return step_change <= target - return step_change >= target - - def _is_greedy(self, step_change): - """Check if it is a greedy step.""" - if self._by_masking: - return step_change < 0 - return step_change > 0 - - @classmethod - def _auto_win_sizes_strides(cls, short_side): - """ - Calculate auto window sizes and strides. - - Args: - short_side (int): Length of search space. - - Returns: - tuple[list[int], list[int]], window sizes and strides. - """ - win_sizes = [] - strides = [] - cur_len = int(short_side/AUTO_WIN_SIZE_DIV) - while len(win_sizes) < AUTO_LAYER_MAX and cur_len >= AUTO_WIN_SIZE_MIN: - stride = int(cur_len/AUTO_STRIDE_DIV) - if stride <= 0: - break - win_sizes.append(cur_len) - strides.append(stride) - cur_len = int(cur_len/AUTO_WIN_SIZE_DIV) - if not win_sizes: - raise ValueError(f"Image's short side is less then {AUTO_IMAGE_SHORT_SIDE_MIN}, " - f"unable to calculates auto settings.") - return win_sizes, strides - - class _StopReason(Enum): - """Stop reason of search job.""" - THRESHOLD_MET = 0 # threshold was met. - STEP_CHANGE_MET = 1 # parent step change was met. - NO_NEW_STEP = 2 # no new step was found. - REPEATED_STEP = 3 # repeated step was found. - - -def _check_iterable_type(arg_name, arg_value, container_type, elem_types): - """Concert iterable argument data type.""" - check_value_type(arg_name, arg_value, container_type) - for elem in arg_value: - check_value_type(arg_name + ' element', elem, elem_types) - - -class _NoNewStepError(Exception): - """Error for no new step was found.""" - - -class _RepeatedStepError(Exception): - """Error for repeated step was found.""" - - -class _SearchJob: - """ - Search job. - - Args: - by_masking (bool): Whether it is masking mode. - class_idx (int): Target class index. - win_sizes (list[int]): Moving square window size (length of side) of layers. - strides (list[int]): Strides of layers. - layer (int): Layer number. - search_field (tuple[int, int, int, int]): Search field in x, y, width, height format. - pre_edit_steps (list[EditStep], optional): Edit steps to be applied before searching. - parent_step (EditStep): Parent edit step. - batch_size (int): Batch size of batched inferences. - """ - - def __init__(self, - by_masking, - class_idx, - win_sizes, - strides, - layer, - search_field, - pre_edit_steps, - parent_step, - batch_size=DEFAULT_BATCH_SIZE): - - if layer >= len(win_sizes): - raise ValueError('Layer is larger then number of window sizes.') - - self.by_masking = by_masking - self.class_idx = class_idx - self.win_sizes = win_sizes - self.strides = strides - self.layer = layer - self.search_field = search_field - self.pre_edit_steps = pre_edit_steps - self.parent_step = parent_step - self.batch_size = batch_size - self.network = None - self.mask = None - self.original_input = None - - self._workpiece = None - self._found_best_edits = None - self._found_uvs = None - self._u_pixels = None - self._v_pixels = None - - @property - def layer_count(self): - """Number of layers.""" - return len(self.win_sizes) - - def on_start(self, original_input, workpiece, mask, network): - """ - Notification of the start of the search job. - - Args: - original_input (numpy.ndarray): The original image tensor in CHW or NCHW(N=1) format. - workpiece (numpy.ndarray): The intermediate image tensor in CHW or NCHW(N=1) format. - mask (Union[tuple[float, float, float], float, numpy.ndarray]): The mask, type can be - tuple[float, float, float]: RGB solid color mask, - float: Grey scale solid color mask. - numpy.ndarray: Image mask, has same format of original_input. - network (nn.Cell): Classification network. - """ - self.original_input = original_input - self.mask = mask - self.network = network - - self._workpiece = workpiece - self._found_best_edits = [] - self._found_uvs = [] - self._u_pixels = self._calc_uv_pixels(self.search_field[0], self.search_field[2]) - self._v_pixels = self._calc_uv_pixels(self.search_field[1], self.search_field[3]) - - def create_sub_job(self, parent_step, pre_edit_steps): - """Create next layer search job.""" - return self.__class__(by_masking=self.by_masking, - class_idx=self.class_idx, - win_sizes=self.win_sizes, - strides=self.strides, - layer=self.layer + 1, - search_field=copy.copy(parent_step.box), - pre_edit_steps=pre_edit_steps, - parent_step=parent_step, - batch_size=self.batch_size) - - def find_best_edit(self): - """ - Find the next best edit step. - - Returns: - EditStep, the next best edit step. - """ - workpiece = self._workpiece - if len(workpiece.shape) == 3: - workpiece = np.expand_dims(workpiece, axis=0) - - # generate input tensors with shifted masked/unmasked region and pack into a batch - best_new_workpiece = None - best_output = None - best_edit = None - best_uv = None - batch = np.repeat(workpiece, repeats=self.batch_size, axis=0) - batch_uvs = [] - batch_steps = [] - batch_i = 0 - win_size = self.win_sizes[self.layer] - for u, x in enumerate(self._u_pixels): - for v, y in enumerate(self._v_pixels): - if (u, v) in self._found_uvs: - continue - - edit_step = EditStep(self.layer, (x, y, win_size, win_size)) - - if self.by_masking: - EditStep.apply(batch[batch_i], - self.mask, - [edit_step], - self.by_masking, - inplace=True) - else: - EditStep.apply(self.original_input, - batch[batch_i], - [edit_step], - self.by_masking, - inplace=True) - - batch_i += 1 - batch_uvs.append((u, v)) - batch_steps.append(edit_step) - if batch_i != self.batch_size: - continue - - # the batch is full, inference and empty it - updated = self._update_best(batch, batch_uvs, batch_steps, best_output) - if updated: - best_output, best_uv, best_edit, best_new_workpiece = updated - - batch = np.repeat(workpiece, repeats=self.batch_size, axis=0) - batch_uvs = [] - batch_i = 0 - - if batch_i > 0: - # don't forget the last half full batch - updated = self._update_best(batch, batch_uvs, batch_steps, best_output, batch_i) - if updated: - best_output, best_uv, best_edit, best_new_workpiece = updated - - if best_edit is None: - raise _NoNewStepError - - if best_uv in self._found_uvs: - raise _RepeatedStepError - - self._found_uvs.append(best_uv) - self._found_best_edits.append(best_edit) - best_edit.network_output = best_output - - # continue on the best workpiece in the next function call - self._workpiece = best_new_workpiece - - return best_edit - - def _update_best(self, batch, batch_uvs, batch_steps, best_output, batch_i=None): - """Update the best edit step.""" - squeeze = Squeeze() - batch_output = self.network(Tensor(batch)) - batch_output = batch_output[:, self.class_idx] - if len(batch_output.shape) > 1: - batch_output = squeeze(batch_output) - - aggregation = np.argmin if self.by_masking else np.argmax - if batch_i is None: - batch_best_i = aggregation(batch_output.asnumpy()) - else: - batch_best_i = aggregation(batch_output.asnumpy()[:batch_i, ...]) - batch_best_output = batch_output[int(batch_best_i)].asnumpy().item() - - if best_output is None or self._is_output0_better(batch_best_output, best_output): - best_output = batch_best_output - best_uv = batch_uvs[batch_best_i] - best_edit = batch_steps[batch_best_i] - best_new_workpiece = batch[batch_best_i] - return best_output, best_uv, best_edit, best_new_workpiece - return None - - def _is_output0_better(self, output0, output1): - """Check if the network output0 is better.""" - if self.by_masking: - return output0 < output1 - return output0 > output1 - - def _calc_uv_pixels(self, begin, length): - """ - Calculate the pixel coordinate of shifts. - - Args: - begin (int): The beginning pixel coordinate of search field. - length (int): The length of search field. - - Returns: - list[int], pixel coordinate of shifts. - """ - win_size = self.win_sizes[self.layer] - stride = self.strides[self.layer] - shift_count = self._calc_shift_count(length, win_size, stride) - pixels = [0] * shift_count - for i in range(shift_count): - if i == shift_count - 1: - pixels[i] = begin + length - win_size - else: - pixels[i] = begin + i*stride - return pixels - - @staticmethod - def _calc_shift_count(length, win_size, stride): - """ - Calculate the number of shifts in search field. - - Args: - length (int): The length of search field. - win_size (int): The length of sides of moving window. - stride (int): The stride. - - Returns: - int, number of shifts. - """ - if length <= win_size or win_size < stride or stride <= 0: - raise ValueError("Invalid length, win_size or stride.") - count = int(math.ceil((length - win_size)/stride)) - if (count - 1)*stride + win_size < length: - return count + 1 - return count diff --git a/mindspore/train/summary/_explain_adapter.py b/mindspore/train/summary/_explain_adapter.py deleted file mode 100644 index 8812f82a9af..00000000000 --- a/mindspore/train/summary/_explain_adapter.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Generate the explain event which conform to proto format.""" -import time - -from ..summary_pb2 import Event, Explain - - -def check_explain_proto(explain): - """ - Package the explain event. - - Args: - explain (Explain): The object of summary_pb2.Explain. - """ - if not isinstance(explain, Explain): - raise TypeError(f'Plugin explainer expects a {Explain.__name__} value.') - - if not explain.image_path and not explain.inference and not explain.metadata.label and not explain.benchmark: - raise ValueError('One of metadata, image path, inference or benchmark has to be filled in.') - - -def package_explain_event(explain_str): - """ - Package the explain event. - - Args: - explain_str (string): The serialize string of summary_pb2.Explain. - - Returns: - Event, event object. - """ - event = Event() - event.wall_time = time.time() - event.explain.ParseFromString(explain_str) - return event.SerializeToString() diff --git a/mindspore/train/summary/_writer_pool.py b/mindspore/train/summary/_writer_pool.py index 8303f6dc46b..c48004a9970 100644 --- a/mindspore/train/summary/_writer_pool.py +++ b/mindspore/train/summary/_writer_pool.py @@ -26,8 +26,7 @@ from mindspore.train.summary.enums import PluginEnum, WriterPluginEnum from ._lineage_adapter import serialize_to_lineage_event from ._summary_adapter import package_graph_event, package_summary_event -from ._explain_adapter import package_explain_event -from .writer import LineageWriter, SummaryWriter, ExplainWriter, ExportWriter +from .writer import LineageWriter, SummaryWriter, ExportWriter try: from multiprocessing import get_context @@ -50,8 +49,6 @@ def _pack_data(datadict, wall_time): PluginEnum.IMAGE.value): summaries.append({'_type': plugin.title(), 'name': data.get('tag'), 'data': data.get('value')}) step = data.get('step') - elif plugin == PluginEnum.EXPLAINER.value: - result.append([plugin, package_explain_event(data.get('value'))]) if 'export_option' in data: result.append([WriterPluginEnum.EXPORTER.value, data]) @@ -85,6 +82,7 @@ class WriterPool(ctx.Process): self.start() def run(self): + """Run the writer pool.""" # Environment variables are used to specify a maximum number of OpenBLAS threads: # In ubuntu(GPU) environment, numpy will use too many threads for computing, # it may affect the start of the summary process. @@ -138,8 +136,6 @@ class WriterPool(ctx.Process): self._writers_.append(SummaryWriter(filepath, self._max_file_size)) elif plugin == WriterPluginEnum.LINEAGE.value: self._writers_.append(LineageWriter(filepath, self._max_file_size)) - elif plugin == WriterPluginEnum.EXPLAINER.value: - self._writers_.append(ExplainWriter(filepath, self._max_file_size)) elif plugin == WriterPluginEnum.EXPORTER.value: self._writers_.append(ExportWriter(filepath, self._max_file_size)) return self._writers_ diff --git a/mindspore/train/summary/summary_record.py b/mindspore/train/summary/summary_record.py index 552ed7dd3fc..7bb4cfdf495 100644 --- a/mindspore/train/summary/summary_record.py +++ b/mindspore/train/summary/summary_record.py @@ -27,7 +27,6 @@ from ..._c_expression import Tensor, security from ..._checkparam import Validator from .._utils import _check_lineage_value, _check_to_numpy, _make_directory, check_value_type from ._summary_adapter import get_event_file_name, package_graph_event -from ._explain_adapter import check_explain_proto from ._writer_pool import WriterPool # for the moment, this lock is for caution's sake, @@ -190,7 +189,6 @@ class SummaryRecord: filename_dict = dict(summary=self.file_info.get('file_name'), lineage=get_event_file_name(file_prefix, '_lineage', time_second), - explainer=get_event_file_name(file_prefix, '_explain', time_second), exporter=export_dir) self._event_writer = WriterPool(log_dir, max_file_size, @@ -253,8 +251,6 @@ class SummaryRecord: see mindspore/ccsrc/lineage.proto. - The data type of value should be a 'UserDefinedInfo' object when the plugin is 'custom_lineage_data', see mindspore/ccsrc/lineage.proto. - - The data type of value should be a 'Explain' object when the plugin is 'explainer', - see mindspore/ccsrc/summary.proto. Raises: ValueError: If the parameter value is invalid. TypeError: If the parameter type is error. @@ -287,9 +283,6 @@ class SummaryRecord: elif plugin == 'graph': package_graph_event(value) self._data_pool[plugin].append(dict(value=value)) - elif plugin == 'explainer': - check_explain_proto(value) - self._data_pool[plugin].append(dict(value=value.SerializeToString())) else: raise ValueError(f'No such plugin of {repr(plugin)}') diff --git a/mindspore/train/summary/writer.py b/mindspore/train/summary/writer.py index d6e445e9fdb..428a190a3d4 100644 --- a/mindspore/train/summary/writer.py +++ b/mindspore/train/summary/writer.py @@ -112,15 +112,6 @@ class LineageWriter(BaseWriter): super().write(plugin, data) -class ExplainWriter(BaseWriter): - """ExplainWriter for write explain data.""" - - def write(self, plugin, data): - """Write data to file.""" - if plugin == WriterPluginEnum.EXPLAINER.value: - super().write(plugin, data) - - class ExportWriter(BaseWriter): """ExportWriter for export data."""