Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert change of moving kpi_target from facades, and ser default MP config in CoreConfig #996

Merged
merged 3 commits into from
Mar 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,15 @@ def compute_kpi_data(in_model: Any,

"""

# We assume that the kpi_data API is used to compute the model KPI for mixed precision scenario,
# so we run graph preparation under the assumption of enabled mixed precision.
transformed_graph = graph_preparation_runner(in_model,
representative_data_gen,
core_config.quantization_config,
fw_info,
fw_impl,
tpc,
mixed_precision_enable=core_config.mixed_precision_enable)
mixed_precision_enable=True)

# Compute parameters sum
weights_params = compute_nodes_weights_params(graph=transformed_graph, fw_info=fw_info)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,11 @@
from typing import List, Callable

from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI


class MixedPrecisionQuantizationConfig:

def __init__(self,
target_kpi: KPI = None,
compute_distance_fn: Callable = None,
distance_weighting_method: MpDistanceWeighting = MpDistanceWeighting.AVG,
num_of_images: int = 32,
Expand All @@ -36,7 +34,6 @@ def __init__(self,
Class with mixed precision parameters to quantize the input model.

Args:
target_kpi (KPI): KPI to constraint the search of the mixed-precision configuration for the model.
compute_distance_fn (Callable): Function to compute a distance between two tensors. If None, using pre-defined distance methods based on the layer type for each layer.
distance_weighting_method (MpDistanceWeighting): MpDistanceWeighting enum value that provides a function to use when weighting the distances among different layers when computing the sensitivity metric.
num_of_images (int): Number of images to use to evaluate the sensitivity of a mixed-precision model comparing to the float model.
Expand All @@ -49,7 +46,6 @@ def __init__(self,

"""

self.target_kpi = target_kpi
self.compute_distance_fn = compute_distance_fn
self.distance_weighting_method = distance_weighting_method
self.num_of_images = num_of_images
Expand All @@ -67,13 +63,21 @@ def __init__(self,

self.metric_normalization_threshold = metric_normalization_threshold

def set_target_kpi(self, target_kpi: KPI):
self._mixed_precision_enable = False

def set_mixed_precision_enable(self):
"""
Set a flag in mixed precision config indicating that mixed precision is enabled.
"""
Setting target KPI in mixed precision config.

Args:
target_kpi: A target KPI to set.
self._mixed_precision_enable = True

@property
def mixed_precision_enable(self):
ofirgo marked this conversation as resolved.
Show resolved Hide resolved
"""
A property that indicates whether mixed precision quantization is enabled.

self.target_kpi = target_kpi
Returns: True if mixed precision quantization is enabled

"""
return self._mixed_precision_enable
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class BitWidthSearchMethod(Enum):
def search_bit_width(graph_to_search_cfg: Graph,
fw_info: FrameworkInfo,
fw_impl: FrameworkImplementation,
target_kpi: KPI,
mp_config: MixedPrecisionQuantizationConfig,
representative_data_gen: Callable,
search_method: BitWidthSearchMethod = BitWidthSearchMethod.INTEGER_PROGRAMMING,
Expand All @@ -63,6 +64,7 @@ def search_bit_width(graph_to_search_cfg: Graph,
graph_to_search_cfg: Graph to search a MP configuration for.
fw_info: FrameworkInfo object about the specific framework (e.g., attributes of different layers' weights to quantize).
fw_impl: FrameworkImplementation object with specific framework methods implementation.
target_kpi: Target KPI to bound our feasible solution space s.t the configuration does not violate it.
mp_config: Mixed-precision quantization configuration.
representative_data_gen: Dataset to use for retrieving images for the models inputs.
search_method: BitWidthSearchMethod to define which searching method to use.
Expand All @@ -74,7 +76,6 @@ def search_bit_width(graph_to_search_cfg: Graph,
bit-width index on the node).

"""
target_kpi = mp_config.target_kpi

# target_kpi have to be passed. If it was not passed, the facade is not supposed to get here by now.
if target_kpi is None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,19 @@ def __init__(self,

Args:
quantization_config (QuantizationConfig): Config for quantization.
mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization (optional, default=None).
mixed_precision_config (MixedPrecisionQuantizationConfig): Config for mixed precision quantization.
If None, a default MixedPrecisionQuantizationConfig is used.
debug_config (DebugConfig): Config for debugging and editing the network quantization process.
"""
self.quantization_config = quantization_config
self.mixed_precision_config = mixed_precision_config
self.debug_config = debug_config

if mixed_precision_config is None:
self.mixed_precision_config = MixedPrecisionQuantizationConfig()
else:
self.mixed_precision_config = mixed_precision_config

@property
def mixed_precision_enable(self):
return self.mixed_precision_config is not None
return self.mixed_precision_config is not None and self.mixed_precision_config.mixed_precision_enable

Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def set_quantization_configs_to_node(node: BaseNode,
quant_config: Quantization configuration to generate the node's configurations from.
fw_info: Information needed for quantization about the specific framework.
tpc: TargetPlatformCapabilities to get default OpQuantizationConfig.
mixed_precision_enable: is mixed precision enabled
mixed_precision_enable: is mixed precision enabled.
"""
node_qc_options = node.get_qco(tpc)

Expand Down
5 changes: 3 additions & 2 deletions model_compression_toolkit/core/graph_prep_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ def graph_preparation_runner(in_model: Any,
fw_impl: FrameworkImplementation object with a specific framework methods implementation.
tpc: TargetPlatformCapabilities object that models the inference target platform and
the attached framework operator's information.
tb_w: TensorboardWriter object for logging
tb_w: TensorboardWriter object for logging.
mixed_precision_enable: is mixed precision enabled.

Returns:
An internal graph representation of the input model.
Expand Down Expand Up @@ -103,7 +104,7 @@ def get_finalized_graph(initial_graph: Graph,
kernel channels indices, groups of layers by how they should be quantized, etc.)
tb_w (TensorboardWriter): TensorboardWriter object to use for logging events such as graphs, histograms, etc.
fw_impl (FrameworkImplementation): FrameworkImplementation object with a specific framework methods implementation.
mixed_precision_enable: is mixed precision enabled.
mixed_precision_enable: is mixed precision enabled.

Returns: Graph object that represents the model, after applying all required modifications to it.
"""
Expand Down
2 changes: 1 addition & 1 deletion model_compression_toolkit/core/pytorch/kpi_data_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

def pytorch_kpi_data(in_model: Module,
representative_data_gen: Callable,
core_config: CoreConfig = CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfig()),
core_config: CoreConfig = CoreConfig(),
target_platform_capabilities: TargetPlatformCapabilities = PYTORCH_DEFAULT_TPC) -> KPI:
"""
Computes KPI data that can be used to calculate the desired target KPI for mixed-precision quantization.
Expand Down
12 changes: 10 additions & 2 deletions model_compression_toolkit/core/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def core_runner(in_model: Any,
fw_info: FrameworkInfo,
fw_impl: FrameworkImplementation,
tpc: TargetPlatformCapabilities,
target_kpi: KPI = None,
tb_w: TensorboardWriter = None):
"""
Quantize a trained model using post-training quantization.
Expand All @@ -66,6 +67,7 @@ def core_runner(in_model: Any,
fw_impl: FrameworkImplementation object with a specific framework methods implementation.
tpc: TargetPlatformCapabilities object that models the inference target platform and
the attached framework operator's information.
target_kpi: KPI to constraint the search of the mixed-precision configuration for the model.
tb_w: TensorboardWriter object for logging

Returns:
Expand All @@ -81,6 +83,13 @@ def core_runner(in_model: Any,
Logger.warning('representative_data_gen generates a batch size of 1 which can be slow for optimization:'
' consider increasing the batch size')

# Checking whether to run mixed precision quantization
if target_kpi is not None:
if core_config.mixed_precision_config is None:
Logger.critical("Provided an initialized target_kpi, that means that mixed precision quantization is "
"enabled, but the provided MixedPrecisionQuantizationConfig is None.")
core_config.mixed_precision_config.set_mixed_precision_enable()

graph = graph_preparation_runner(in_model,
representative_data_gen,
core_config.quantization_config,
Expand All @@ -105,13 +114,12 @@ def core_runner(in_model: Any,
# Finalize bit widths
######################################
if core_config.mixed_precision_enable:
if core_config.mixed_precision_config.target_kpi is None:
Logger.critical(f"Trying to run Mixed Precision quantization without providing a valid target KPI.")
if core_config.mixed_precision_config.configuration_overwrite is None:

bit_widths_config = search_bit_width(tg,
fw_info,
fw_impl,
target_kpi,
core_config.mixed_precision_config,
representative_data_gen,
hessian_info_service=hessian_info_service)
Expand Down
16 changes: 9 additions & 7 deletions model_compression_toolkit/gptq/keras/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def get_keras_gptq_config(n_epochs: int,
def keras_gradient_post_training_quantization(in_model: Model, representative_data_gen: Callable,
gptq_config: GradientPTQConfig,
gptq_representative_data_gen: Callable = None,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
"""
Expand All @@ -139,6 +140,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
representative_data_gen (Callable): Dataset used for calibration.
gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.

Expand Down Expand Up @@ -166,26 +168,26 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da

>>> config = mct.core.CoreConfig()

If mixed precision is desired, create an MCT core config with a mixed-precision configuration, to quantize a model
with different bitwidths for different layers.
The candidates bitwidth for quantization should be defined in the target platform model:

>>> config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1))

For mixed-precision set a target KPI object:
Create a KPI object to limit our returned model's size. Note that this value affects only coefficients
that should be quantized (for example, the kernel of Conv2D in Keras will be affected by this value,
while the bias will not):

>>> kpi = mct.core.KPI(model.count_params() * 0.75) # About 0.75 of the model size when quantized with 8 bits.

If mixed precision is desired, create an MCT core config with a mixed-precision configuration, to quantize a model
with different bitwidths for different layers.
The candidates bitwidth for quantization should be defined in the target platform model:

>>> config = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=1, target_kpi=kpi))

Create GPTQ config:

>>> gptq_config = mct.gptq.get_keras_gptq_config(n_epochs=1)

Pass the model with the representative dataset generator to get a quantized model:

>>> quantized_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(model, repr_datagen, gptq_config, core_config=config)
>>> quantized_model, quantization_info = mct.gptq.keras_gradient_post_training_quantization(model, repr_datagen, gptq_config, target_kpi=kpi, core_config=config)

"""
KerasModelValidation(model=in_model,
Expand Down
3 changes: 3 additions & 0 deletions model_compression_toolkit/gptq/pytorch/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def get_pytorch_gptq_config(n_epochs: int,

def pytorch_gradient_post_training_quantization(model: Module,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
gptq_config: GradientPTQConfig = None,
gptq_representative_data_gen: Callable = None,
Expand All @@ -117,6 +118,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
Args:
model (Module): Pytorch model to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
Expand Down Expand Up @@ -174,6 +176,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
fw_info=DEFAULT_PYTORCH_INFO,
fw_impl=fw_impl,
tpc=target_platform_capabilities,
target_kpi=target_kpi,
tb_w=tb_w)

# ---------------------- #
Expand Down
3 changes: 3 additions & 0 deletions model_compression_toolkit/ptq/keras/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

def keras_post_training_quantization(in_model: Model,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
"""
Expand All @@ -60,6 +61,7 @@ def keras_post_training_quantization(in_model: Model,
Args:
in_model (Model): Keras model to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.

Expand Down Expand Up @@ -135,6 +137,7 @@ def keras_post_training_quantization(in_model: Model,
fw_info=fw_info,
fw_impl=fw_impl,
tpc=target_platform_capabilities,
target_kpi=target_kpi,
tb_w=tb_w)

tg = ptq_runner(tg, representative_data_gen, core_config, fw_info, fw_impl, tb_w)
Expand Down
3 changes: 3 additions & 0 deletions model_compression_toolkit/ptq/pytorch/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@

def pytorch_post_training_quantization(in_module: Module,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
"""
Expand All @@ -59,6 +60,7 @@ def pytorch_post_training_quantization(in_module: Module,
Args:
in_module (Module): Pytorch module to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.

Expand Down Expand Up @@ -107,6 +109,7 @@ def pytorch_post_training_quantization(in_module: Module,
fw_info=DEFAULT_PYTORCH_INFO,
fw_impl=fw_impl,
tpc=target_platform_capabilities,
target_kpi=target_kpi,
tb_w=tb_w)

tg = ptq_runner(tg, representative_data_gen, core_config, DEFAULT_PYTORCH_INFO, fw_impl, tb_w)
Expand Down
Loading
Loading