fixbug: NaiveFaithfulness NaN warning and system errors when benchmark num_labels and model output dimension does not match.
This commit is contained in:
parent
81ff2d556a
commit
f6134bd1da
|
@ -13,6 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
"""Faithfulness."""
|
"""Faithfulness."""
|
||||||
|
from decimal import Decimal
|
||||||
from typing import Callable, Optional, Union
|
from typing import Callable, Optional, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -147,8 +148,8 @@ class NaiveFaithfulness(_FaithfulnessHelper):
|
||||||
- faithfulness (np.ndarray): faithfulness score
|
- faithfulness (np.ndarray): faithfulness score
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not np.count_nonzero(saliency):
|
if Decimal(str(saliency.max())) == Decimal(str(saliency.min())):
|
||||||
log.warning("The saliency map is zero everywhere. The correlation will be set to zero.")
|
log.warning("The saliency map is uniform everywhere. The correlation will be set to zero.")
|
||||||
correlation = 0
|
correlation = 0
|
||||||
return np.array([correlation], np.float)
|
return np.array([correlation], np.float)
|
||||||
|
|
||||||
|
@ -163,6 +164,11 @@ class NaiveFaithfulness(_FaithfulnessHelper):
|
||||||
predictions = model(perturbations)[:, targets].asnumpy()
|
predictions = model(perturbations)[:, targets].asnumpy()
|
||||||
predictions = predictions.reshape(*feature_importance.shape)
|
predictions = predictions.reshape(*feature_importance.shape)
|
||||||
|
|
||||||
|
if Decimal(str(predictions.max())) == Decimal(str(predictions.min())):
|
||||||
|
log.warning("The perturbations do not affect the predictions. The correlation will be set to zero.")
|
||||||
|
correlation = 0
|
||||||
|
return np.array([correlation], np.float)
|
||||||
|
|
||||||
faithfulness = -np.corrcoef(feature_importance, predictions)
|
faithfulness = -np.corrcoef(feature_importance, predictions)
|
||||||
faithfulness = np.diag(faithfulness[:batch_size, batch_size:])
|
faithfulness = np.diag(faithfulness[:batch_size, batch_size:])
|
||||||
return faithfulness
|
return faithfulness
|
||||||
|
|
|
@ -203,8 +203,8 @@ class LabelSensitiveMetric(AttributionMetric):
|
||||||
output = explainer.model(inputs)
|
output = explainer.model(inputs)
|
||||||
check_value_type("output of explainer model", output, Tensor)
|
check_value_type("output of explainer model", output, Tensor)
|
||||||
output_dim = explainer.model(inputs).shape[1]
|
output_dim = explainer.model(inputs).shape[1]
|
||||||
if output_dim > self._num_labels:
|
if output_dim != self._num_labels:
|
||||||
raise ValueError("The output dimension of of black-box model in explainer should not exceed the dimension "
|
raise ValueError("The output dimension of of black-box model in explainer does not match the dimension "
|
||||||
"of num_labels set in the __init__, please set num_labels larger.")
|
"of num_labels set in the __init__, please check explainer and num_labels again.")
|
||||||
verify_targets(targets, self._num_labels)
|
verify_targets(targets, self._num_labels)
|
||||||
check_value_type('saliency', saliency, (Tensor, type(None)))
|
check_value_type('saliency', saliency, (Tensor, type(None)))
|
||||||
|
|
Loading…
Reference in New Issue