diff --git a/coremltools/optimize/coreml/experimental/_model_debugger.py b/coremltools/optimize/coreml/experimental/_model_debugger.py index d2733f597..667175aa8 100644 --- a/coremltools/optimize/coreml/experimental/_model_debugger.py +++ b/coremltools/optimize/coreml/experimental/_model_debugger.py @@ -325,5 +325,5 @@ def step( key: value for key, value in outputs.items() if key not in model_output_names } - for output_name, output_value in outputs.items(): + for output_name, output_value in intermediate_outputs.items(): self.record_intermediate_output(output_value, output_name, activation_stats_dict) diff --git a/coremltools/test/optimize/coreml/test_passes.py b/coremltools/test/optimize/coreml/test_passes.py index 6b409db22..e6b8e0f76 100644 --- a/coremltools/test/optimize/coreml/test_passes.py +++ b/coremltools/test/optimize/coreml/test_passes.py @@ -3830,3 +3830,24 @@ def test_get_activation_calibration_stats_concat_surrounding_ops(self): # Since mlmodel has a concat with 2 inputs and 1 output, we should see at least 3 rmin/rmax pairs are identical in activation_stats. # If we dedup rmin/rmax pairs with identical values, the length of unique values should at least reduced by 2 compared with original one. assert len(activation_stats) - len(activation_stats_unique) >= 2 + + def test_get_activation_calibration_stats_excludes_model_outputs(self): + """ + The activation calibration stats shouldn't include the model's final outputs. + """ + # Prepare sample data + sample_data = [] + for _ in range(3): + input_data = np.random.rand(5, 10, 4, 4) + sample_data.append({"data": input_data}) + + # Loading a floating point mlmodel + mlmodel = self._get_test_mlmodel_conv_relu() + + activation_stats = _get_activation_calibration_stats(mlmodel, sample_data) + + model_spec = mlmodel.get_spec() + output_count = len(mlmodel.get_spec().description.output) + for i in range(0, output_count): + output_name = model_spec.description.output[i].name + assert output_name not in activation_stats \ No newline at end of file