fixbug: NaiveFaithfulness NaN warning and system errors when benchmark num_labels and model output dimension does not match.

This commit is contained in:
lixiaohui 2020-11-27 19:30:27 +08:00
parent 81ff2d556a
commit f6134bd1da
2 changed files with 11 additions and 5 deletions

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""Faithfulness."""
from decimal import Decimal
from typing import Callable, Optional, Union
import numpy as np
@ -147,8 +148,8 @@ class NaiveFaithfulness(_FaithfulnessHelper):
- faithfulness (np.ndarray): faithfulness score
"""
if not np.count_nonzero(saliency):
log.warning("The saliency map is zero everywhere. The correlation will be set to zero.")
if Decimal(str(saliency.max())) == Decimal(str(saliency.min())):
log.warning("The saliency map is uniform everywhere. The correlation will be set to zero.")
correlation = 0
return np.array([correlation], np.float)
@ -163,6 +164,11 @@ class NaiveFaithfulness(_FaithfulnessHelper):
predictions = model(perturbations)[:, targets].asnumpy()
predictions = predictions.reshape(*feature_importance.shape)
if Decimal(str(predictions.max())) == Decimal(str(predictions.min())):
log.warning("The perturbations do not affect the predictions. The correlation will be set to zero.")
correlation = 0
return np.array([correlation], np.float)
faithfulness = -np.corrcoef(feature_importance, predictions)
faithfulness = np.diag(faithfulness[:batch_size, batch_size:])
return faithfulness

View File

@ -203,8 +203,8 @@ class LabelSensitiveMetric(AttributionMetric):
output = explainer.model(inputs)
check_value_type("output of explainer model", output, Tensor)
output_dim = explainer.model(inputs).shape[1]
if output_dim > self._num_labels:
raise ValueError("The output dimension of of black-box model in explainer should not exceed the dimension "
"of num_labels set in the __init__, please set num_labels larger.")
if output_dim != self._num_labels:
raise ValueError("The output dimension of of black-box model in explainer does not match the dimension "
"of num_labels set in the __init__, please check explainer and num_labels again.")
verify_targets(targets, self._num_labels)
check_value_type('saliency', saliency, (Tensor, type(None)))