diff --git a/innvestigate/analyzer/__init__.py b/innvestigate/analyzer/__init__.py index d3689bf3..830ca8b6 100644 --- a/innvestigate/analyzer/__init__.py +++ b/innvestigate/analyzer/__init__.py @@ -10,12 +10,12 @@ from .base import NotAnalyzeableModelException from .base import ReverseAnalyzerBase # from .deeplift import DeepLIFTWrapper -#from .gradient_based import Gradient -#from .gradient_based import InputTimesGradient -#from .gradient_based import GuidedBackprop -#from .gradient_based import Deconvnet -#from .gradient_based import IntegratedGradients -#from .gradient_based import SmoothGrad +from .gradient_based import Gradient +from .gradient_based import InputTimesGradient +from .gradient_based import GuidedBackprop +from .gradient_based import Deconvnet +# from .gradient_based import IntegratedGradients +# from .gradient_based import SmoothGrad # from .misc import Input # from .misc import Random # from .pattern_based import PatternNet @@ -44,8 +44,8 @@ from .relevance_based.relevance_analyzer import LRPSequentialCompositeAFlat from .relevance_based.relevance_analyzer import LRPSequentialCompositeBFlat from .relevance_based.relevance_analyzer import LRPRuleUntilIndex -# from .deeptaylor import DeepTaylor -# from .deeptaylor import BoundedDeepTaylor +from .deeptaylor import DeepTaylor +from .deeptaylor import BoundedDeepTaylor from .wrapper import WrapperBase from .wrapper import AugmentReduceBase from .wrapper import GaussianSmoother @@ -108,8 +108,8 @@ "lrp.rule_until_index": LRPRuleUntilIndex, # Deep Taylor - #"deep_taylor": DeepTaylor, - #"deep_taylor.bounded": BoundedDeepTaylor, + "deep_taylor": DeepTaylor, + "deep_taylor.bounded": BoundedDeepTaylor, # # DeepLIFT # "deep_lift.wrapper": DeepLIFTWrapper, diff --git a/innvestigate/analyzer/base.py b/innvestigate/analyzer/base.py index 9d147357..f18154ef 100644 --- a/innvestigate/analyzer/base.py +++ b/innvestigate/analyzer/base.py @@ -349,6 +349,9 @@ def analyze(self, X, neuron_selection="max_activation", explained_layer_names=No self._analyzed = True ret = self._postprocess_analysis(ret) + if isinstance(ret, list) and len(ret) == 1: + ret = ret[0] + return ret def _postprocess_analysis(self, hm): diff --git a/innvestigate/analyzer/deeptaylor.py b/innvestigate/analyzer/deeptaylor.py index 843163ea..9835f1f7 100644 --- a/innvestigate/analyzer/deeptaylor.py +++ b/innvestigate/analyzer/deeptaylor.py @@ -13,7 +13,7 @@ from tensorflow.python.keras.engine.input_layer import InputLayer from . import base -from .relevance_based import relevance_rule as lrp_rules +from .relevance_based import relevance_rule_base as lrp_rules from ..utils.keras import checks as kchecks from ..utils.keras import graph as kgraph @@ -71,35 +71,40 @@ def do_nothing(Xs, Ys, As, reverse_state): self._add_conditional_reverse_mapping( lambda l: (not kchecks.contains_kernel(l) and kchecks.contains_activation(l)), - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_relu", ) # Assume conv layer beforehand -> unbounded - bn_mapping = kgraph.apply_mapping_to_fused_bn_layer( - lrp_rules.WSquareRule, - fuse_mode="one_linear", - ) + # bn_mapping = kgraph.apply_mapping_to_fused_bn_layer( + # lrp_rules.WSquareRule, + # fuse_mode="one_linear", + # ) + # self._add_conditional_reverse_mapping( + # kchecks.is_batch_normalization_layer, + # bn_mapping, + # name="deep_taylor_batch_norm", + # ) self._add_conditional_reverse_mapping( kchecks.is_batch_normalization_layer, - bn_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_batch_norm", ) # Special layers. self._add_conditional_reverse_mapping( kchecks.is_max_pooling, - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_max_pooling", ) self._add_conditional_reverse_mapping( kchecks.is_average_pooling, - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_average_pooling", ) self._add_conditional_reverse_mapping( lambda l: isinstance(l, keras_layers.Add), # Ignore scaling with 0.5 - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_add", ) self._add_conditional_reverse_mapping( @@ -112,7 +117,7 @@ def do_nothing(Xs, Ys, As, reverse_state): keras_layers.SpatialDropout2D, keras_layers.SpatialDropout3D, )), - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_special_layers", ) @@ -133,19 +138,19 @@ def do_nothing(Xs, Ys, As, reverse_state): keras_layers.RepeatVector, keras_layers.Reshape, )), - self._gradient_reverse_mapping, + self._gradient_reverse_mapping(), name="deep_taylor_no_transform", ) return super(DeepTaylor, self)._create_analysis( *args, **kwargs) - def _default_reverse_mapping(self, Xs, Ys, reversed_Ys, reverse_state): + def _default_reverse_mapping(self, layer): """ Block all default mappings. """ raise NotImplementedError( - "Layer %s not supported." % reverse_state["layer"]) + "Layer %s not supported." % layer) def _prepare_model(self, model): """ diff --git a/innvestigate/utils/keras/functional.py b/innvestigate/utils/keras/functional.py index b905d122..b4f6bb11 100644 --- a/innvestigate/utils/keras/functional.py +++ b/innvestigate/utils/keras/functional.py @@ -615,8 +615,8 @@ def boundedrule_explanation(ins, layer_func, layer_func_pos, layer_func_neg, out #print("TRACING bound") to_low = keras_layers.Lambda(lambda x: x * 0 + low_param) to_high = keras_layers.Lambda(lambda x: x * 0 + high_param) - low = [to_low(x) for x in ins] - high = [to_high(x) for x in ins] + low = tf.map_fn(to_low, ins) + high = tf.map_fn(to_high, ins) A = out_func(ins, layer_func) B = out_func(low, layer_func_pos) diff --git a/setup.py b/setup.py index c07581f0..c3062f4a 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ "numpy", "pillow", "scipy", - "tensorflow==2.1", + "tensorflow>=2.3", ] setup_requirements = [