From 4c9ff6168bc5a8fae0c9dbb19c4701b756854cf4 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Wed, 20 Mar 2024 10:59:13 +0800 Subject: [PATCH 01/25] Enable Uniform QDQ for Keras Models Signed-off-by: zehao-intel --- neural_compressor/adaptor/keras.py | 112 ++++++++++++++++- .../adaptor/keras_utils/conv2d.py | 118 +++++++++++++++--- .../adaptor/keras_utils/dense.py | 42 ++++--- .../adaptor/keras_utils/pool2d.py | 16 +-- .../adaptor/keras_utils/quantizer.py | 90 +++++++++++++ 5 files changed, 335 insertions(+), 43 deletions(-) diff --git a/neural_compressor/adaptor/keras.py b/neural_compressor/adaptor/keras.py index e6fbfb963c5..2f3a4aec0be 100644 --- a/neural_compressor/adaptor/keras.py +++ b/neural_compressor/adaptor/keras.py @@ -51,11 +51,13 @@ def _add_supported_quantized_objects(custom_objects): from neural_compressor.adaptor.keras_utils.dense import QDense from neural_compressor.adaptor.keras_utils.depthwise_conv2d import QDepthwiseConv2D from neural_compressor.adaptor.keras_utils.pool2d import QAvgPool2D, QMaxPool2D - from neural_compressor.adaptor.keras_utils.quantizer import DeQuantize, FakeQuant, Quantize + from neural_compressor.adaptor.keras_utils.quantizer import DeQuantize, FakeQuant, Quantize, UniformQuantize, UniformDeQuantize from neural_compressor.adaptor.keras_utils.separable_conv2d import QSeparableConv2D custom_objects["Quantize"] = Quantize custom_objects["DeQuantize"] = DeQuantize + custom_objects["UniformQuantize"] = UniformQuantize + custom_objects["UniformDeQuantize"] = UniformDeQuantize custom_objects["FakeQuant"] = FakeQuant custom_objects["QConv2D"] = QConv2D custom_objects["QDepthwiseConv2D"] = QDepthwiseConv2D @@ -491,6 +493,114 @@ def _calibrate(self, model, dataloader, calib_interation): quantized_model = self._restore_model_from_json(json_model) return quantized_model + def _calibrate_with_uniform_qdq(self, model, dataloader, calib_interation): + # run eagerly to fetch the numpy min/max + model.compile(run_eagerly=True) + results = {} + for idx, (inputs, labels) in enumerate(dataloader): + outputs = model.predict_on_batch(inputs) + json_model = copy.deepcopy(json.loads(model.to_json())) + config = json_model["config"] + layers = config["layers"] + for layer in layers: + if layer["class_name"] == "FakeQuant": + min_value = layer["config"]["min_value"] + max_value = layer["config"]["max_value"] + if layer["config"]["name"] not in results: + results[layer["config"]["name"]] = {"min": [min_value], "max": [max_value]} + else: + results[layer["config"]["name"]]["min"].append(min_value) + results[layer["config"]["name"]]["max"].append(max_value) + if idx + 1 == calib_interation: + break + + # insert the calibrated min/max to Q/DQ + json_model = copy.deepcopy(json.loads(model.to_json())) + config = json_model["config"] + layers = config["layers"] + q_layers = [] + # quantize_mode = self._check_quantize_mode(json_model) + inbound_reverse_map = {} + for idx, layer in enumerate(layers): + layer_config = copy.deepcopy(layer["config"]) + if layer["class_name"] == "FakeQuant": + min_value = min(results[layer["config"]["name"]]["min"]) + max_value = max(results[layer["config"]["name"]]["max"]) + T = layer_config["T"] + zero_points = 0 if T == "s8" else 128 + ranges = 127 if T == "s8" else 255 + scales = max(abs(max_value), abs(min_value))/ranges + + quantize_layer = { + "class_name": "UniformQuantize", + "name": "uniform_quantize_" + str(idx), + "config": { + "scales": scales, + "zero_points": zero_points, + "T": T, + "quantization_axis": -1, + "name": "uniform_quantize_" + str(idx), + }, + } + dequantize_layer = { + "class_name": "UniformDeQuantize", + "name": "uniform_dequantize_" + str(idx), + "config": { + "scales": scales, + "zero_points": zero_points, + "T": T, + "quantization_axis": -1, + "name": "uniform_dequantize_" + str(idx), + }, + } + if "inbound_nodes" in layer: + quantize_layer["inbound_nodes"] = layer["inbound_nodes"] + dequantize_layer["inbound_nodes"] = [[["quantize_" + str(idx), 0, 0, {}]]] + # find the conv/dense layer from fake quant map and + # change the conv/dense node inbound to dequantize + layer_name = self.inbound_nodes_map[layer["name"]]["name"] + inbound_reverse_map[layer_name] = [[["dequantize_" + str(idx), 0, 0, {}]]] + + q_layers.append(quantize_layer) + q_layers.append(dequantize_layer) + elif ( + layer["class_name"] in self.supported_op + and layer["config"]["name"] in self.quantize_config["op_wise_config"] + ): + # index 0 is weight, index 1 is bias + q_layer_name = "Q" + layer["class_name"] + # this is for inbounds search + q_name = layer["config"]["name"] + # for layers that have weights + if layer["config"]["name"] in self.layer_weights: + kernel = self.layer_weights[layer["config"]["name"]][0] + dim = list(range(0, kernel.ndim)) + t_dim = [dim.pop(-1)] + t_dim.extend(dim) + channel_size = kernel.shape[-1] + kernel_channel = kernel.transpose(t_dim).reshape(channel_size, -1) + min_value = json.dumps(np.min(kernel_channel, axis=1).tolist()) + max_value = json.dumps(np.max(kernel_channel, axis=1).tolist()) + layer_config["scales"] = max(abs(max_value), abs(min_value))/127 + layer_config["zero_points"] = 0 + else: + # default value, but never expected to be used + # cause no kernel weights for this layer + layer_config["scales"] = json.dumps([78.7]) + layer_config["zero_points"] = json.dumps([0]) + + layer_config["name"] = q_name + q_layer = {"class_name": q_layer_name, "name": q_name, "config": layer_config} + if "inbound_nodes" in layer: + q_layer["inbound_nodes"] = inbound_reverse_map[layer["name"]] + q_layers.append(q_layer) + else: + q_layers.append(layer) + + json_model["config"]["layers"] = q_layers + quantized_model = self._restore_model_from_json(json_model) + return quantized_model + def convert_bf16(self): """Execute the BF16 conversion.""" tf.keras.mixed_precision.set_global_policy("mixed_bfloat16") diff --git a/neural_compressor/adaptor/keras_utils/conv2d.py b/neural_compressor/adaptor/keras_utils/conv2d.py index d1b72a196eb..c9a87c3058c 100644 --- a/neural_compressor/adaptor/keras_utils/conv2d.py +++ b/neural_compressor/adaptor/keras_utils/conv2d.py @@ -29,6 +29,86 @@ from keras.layers.convolutional.base_conv import Conv # pylint: disable=E0401 +# class QConv2D(Conv): +# def __init__( +# self, +# filters, +# kernel_size, +# strides=(1, 1), +# padding="valid", +# data_format=None, +# dilation_rate=(1, 1), +# groups=1, +# activation=None, +# use_bias=True, +# kernel_initializer="glorot_uniform", +# bias_initializer="zeros", +# kernel_regularizer=None, +# bias_regularizer=None, +# activity_regularizer=None, +# kernel_constraint=None, +# bias_constraint=None, +# min_value=-10000, +# max_value=10000, +# **kwargs +# ): +# super(QConv2D, self).__init__( +# rank=2, +# filters=filters, +# kernel_size=kernel_size, +# strides=strides, +# padding=padding, +# data_format=data_format, +# dilation_rate=dilation_rate, +# groups=groups, +# activation=activations.get(activation), +# use_bias=use_bias, +# kernel_initializer=initializers.get(kernel_initializer), +# bias_initializer=initializers.get(bias_initializer), +# kernel_regularizer=regularizers.get(kernel_regularizer), +# bias_regularizer=regularizers.get(bias_regularizer), +# activity_regularizer=regularizers.get(activity_regularizer), +# kernel_constraint=constraints.get(kernel_constraint), +# bias_constraint=constraints.get(bias_constraint), +# **kwargs +# ) +# self.min_value = json.loads(min_value) +# self.max_value = json.loads(max_value) + +# def call(self, inputs): +# # add the Q/DQ here +# kernel, _, _ = quantization.quantize( +# self.kernel, self.min_value, self.max_value, tf.qint8, axis=3, mode="SCALED" +# ) +# kernel = quantization.dequantize( +# kernel, +# self.min_value, +# self.max_value, +# axis=3, +# mode="SCALED", +# ) +# outputs = tf.keras.backend.conv2d( +# inputs, +# kernel, +# strides=self.strides, +# padding=self.padding, +# data_format=self.data_format, +# dilation_rate=self.dilation_rate, +# ) + +# if self.use_bias: +# outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) + +# if self.activation is not None: +# return self.activation(outputs) + +# return outputs + +# @classmethod +# def from_config(cls, config): +# return cls(**config) + + class QConv2D(Conv): def __init__( self, @@ -48,8 +128,8 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - min_value=-10000, - max_value=10000, + scales=78.7, + zero_points=0, **kwargs ): super(QConv2D, self).__init__( @@ -72,21 +152,29 @@ def __init__( bias_constraint=constraints.get(bias_constraint), **kwargs ) - self.min_value = json.loads(min_value) - self.max_value = json.loads(max_value) + self.scales = json.loads(scales) + self.zero_points = json.loads(zero_points) def call(self, inputs): # add the Q/DQ here - kernel, _, _ = quantization.quantize( - self.kernel, self.min_value, self.max_value, tf.qint8, axis=3, mode="SCALED" - ) - kernel = quantization.dequantize( - kernel, - self.min_value, - self.max_value, - axis=3, - mode="SCALED", - ) + kernel = tf.raw_ops.UniformQuantize( + input=self.kernel, + scales=self.scales, + zero_points=self.zero_points, + Tout=tf.qint8, + quantization_min_val=-127, + quantization_max_val=128, + quantization_axis=3,) + + kernel = tf.raw_ops.UniformDequantize( + input=kernel, + scales=self.scales, + zero_points=self.zero_points, + Tout=tf.float32, + quantization_min_val=-127, + quantization_max_val=128, + quantization_axis=3,) + outputs = tf.keras.backend.conv2d( inputs, kernel, @@ -106,4 +194,4 @@ def call(self, inputs): @classmethod def from_config(cls, config): - return cls(**config) + return cls(**config) \ No newline at end of file diff --git a/neural_compressor/adaptor/keras_utils/dense.py b/neural_compressor/adaptor/keras_utils/dense.py index b97e9759b70..8e2be478c6f 100644 --- a/neural_compressor/adaptor/keras_utils/dense.py +++ b/neural_compressor/adaptor/keras_utils/dense.py @@ -36,8 +36,8 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - min_value=-10000, - max_value=10000, + scales=78.7, + zero_points=0, **kwargs ): super(QDense, self).__init__( @@ -53,30 +53,34 @@ def __init__( bias_constraint=bias_constraint, **kwargs ) - self.min_value = json.loads(min_value) - self.max_value = json.loads(max_value) + self.scales = json.loads(scales) + self.zero_points = json.loads(zero_points) def call(self, inputs): # add the Q/DQ here - kernel, _, _ = quantization.quantize( - self.kernel, - self.min_value, - self.max_value, - tf.qint8, - axis=1, - mode="SCALED", - ) - kernel = quantization.dequantize( - kernel, - self.min_value, - self.max_value, - axis=1, - mode="SCALED", - ) + kernel = tf.raw_ops.UniformQuantize( + input=self.kernel, + scales=self.scales, + zero_points=self.zero_points, + Tout=tf.qint8, + quantization_min_val=-127, + quantization_max_val=128, + quantization_axis=1,) + + kernel = tf.raw_ops.UniformDequantize( + input=kernel, + scales=self.scales, + zero_points=self.zero_points, + Tout=tf.float32, + quantization_min_val=-127, + quantization_max_val=128, + quantization_axis=1,) + outputs = tf.keras.backend.dot(inputs, kernel) if self.use_bias: outputs = tf.keras.backend.bias_add(outputs, self.bias) if self.activation is not None: outputs = self.activation(outputs) + return outputs diff --git a/neural_compressor/adaptor/keras_utils/pool2d.py b/neural_compressor/adaptor/keras_utils/pool2d.py index 409c16b9305..0e01585af1f 100644 --- a/neural_compressor/adaptor/keras_utils/pool2d.py +++ b/neural_compressor/adaptor/keras_utils/pool2d.py @@ -30,15 +30,15 @@ def __init__( strides=None, padding="valid", data_format=None, - min_value=-10000, - max_value=10000, + scales=78.7, + zero_points=0, **kwargs ): super(QAvgPool2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs ) - self.min_value = json.loads(min_value) - self.max_value = json.loads(max_value) + self.scales = json.loads(scales) + self.zero_points = json.loads(zero_points) class QMaxPool2D(MaxPooling2D): @@ -48,12 +48,12 @@ def __init__( strides=None, padding="valid", data_format=None, - min_value=-10000, - max_value=10000, + scales=78.7, + zero_points=0, **kwargs ): super(QMaxPool2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs ) - self.min_value = json.loads(min_value) - self.max_value = json.loads(max_value) + self.scales = json.loads(scales) + self.zero_points = json.loads(zero_points) diff --git a/neural_compressor/adaptor/keras_utils/quantizer.py b/neural_compressor/adaptor/keras_utils/quantizer.py index b395870b48f..0c394c967e4 100644 --- a/neural_compressor/adaptor/keras_utils/quantizer.py +++ b/neural_compressor/adaptor/keras_utils/quantizer.py @@ -135,3 +135,93 @@ def get_config(self): @classmethod def from_config(cls, config): return cls(**config) + + +class UniformQuantize(Layer): + def __init__( + self, + scales, + zero_points, + T="s8", + quantization_axis=-1, + name=None + **kwargs + ): + super(Quantize, self).__init__(**kwargs) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.scales = float(scales) + self.zero_points = float(zero_points) + self.T = T_map[T] + self.quantization_axis = quantization_axis + self.name = name + self.quantization_min_val = -127 if T=="s8" else 0 + self.quantization_max_val = 128 if T=="s8" else 255 + + def call(self, inputs): + outputs = tf.raw_ops.UniformQuantize( + input=inputs, + scales=tf.constant([0.4],dtype=tf.float32, shape=()), + zero_points=tf.constant([0], dtype=tf.int32, shape=()), + Tout=self.T, + quantization_min_val=self.quantization_min_val, + quantization_max_val=self.quantization_max_val, + quantization_axis=self.quantization_axis, + name=self.name) + + return outputs + + def get_config(self): + return { + "scales": self.scales, + "zero_points": self.zero_points, + "T": self.T, + "quantization_axis": self.quantization_axis, + "name": self.name, + } + + @classmethod + def from_config(cls, config): + return cls(**config) + + +class UniformDeQuantize(Layer): + def __init__(self, + scales, + zero_points, + T="s8", + quantization_axis=-1, + name=None + **kwargs): + super(DeQuantize, self).__init__(**kwargs) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.scales = float(scales) + self.zero_points = float(zero_points) + self.T = T_map[T] + self.quantization_axis = quantization_axis + self.name = name + self.quantization_min_val = -127 if T=="s8" else 0 + self.quantization_max_val = 128 if T=="s8" else 255 + + def call(self, inputs): + return tf.raw_ops.UniformDequantize( + input=inputs, + scales=self.scales, + zero_points=self.zero_points, + Tout=tf.float32, + quantization_min_val=self.quantization_min_val, + quantization_max_val=self.quantization_max_val, + quantization_axis=self.quantization_axis, + name=self.name) + + def get_config(self): + return { + "scales": self.scales, + "zero_points": self.zero_points, + "T": self.T, + "quantization_axis": self.quantization_axis, + "name": self.name, + } + + @classmethod + def from_config(cls, config): + return cls(**config) From bfd0524935b8d9e636f594afdc2e12d4a3c20e02 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Wed, 20 Mar 2024 13:53:08 +0800 Subject: [PATCH 02/25] fix bugs Signed-off-by: zehao-intel --- .../resnet50/quantization/ptq/main.py | 3 ++- neural_compressor/adaptor/keras.py | 25 +++++++++++-------- .../adaptor/keras_utils/quantizer.py | 24 ++++++------------ 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/examples/keras/image_recognition/resnet50/quantization/ptq/main.py b/examples/keras/image_recognition/resnet50/quantization/ptq/main.py index 7c5cc4abdc6..3fedd756ecf 100644 --- a/examples/keras/image_recognition/resnet50/quantization/ptq/main.py +++ b/examples/keras/image_recognition/resnet50/quantization/ptq/main.py @@ -116,7 +116,8 @@ def main(_): from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig conf = PostTrainingQuantConfig(backend='itex', - calibration_sampling_size=[50, 100]) + calibration_sampling_size=[50, 100], + accuracy_criterion = AccuracyCriterion(tolerable_loss=0.1)) q_model = quantization.fit(FLAGS.input_model, conf=conf, calib_dataloader=calib_dataloader, eval_func=evaluate) q_model.save(FLAGS.output_model) diff --git a/neural_compressor/adaptor/keras.py b/neural_compressor/adaptor/keras.py index 2f3a4aec0be..9792b52c8b3 100644 --- a/neural_compressor/adaptor/keras.py +++ b/neural_compressor/adaptor/keras.py @@ -325,8 +325,6 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): converted_model = self.convert_bf16() return converted_model - if self.backend == "itex": - self._check_itex() logger.debug("Dump quantization configurations:") logger.debug(self.quantize_config) calib_sampling_size = tune_cfg.get("calib_sampling_size", 1) @@ -388,7 +386,7 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): json_model["config"]["layers"] = q_layers quantized_model = self._restore_model_from_json(json_model) - converted_model = self._calibrate(quantized_model, dataloader, self.quantize_config["calib_iteration"]) + converted_model = self._calibrate_with_uniform_qdq(quantized_model, dataloader, self.quantize_config["calib_iteration"]) from neural_compressor.model.keras_model import KerasModel @@ -533,24 +531,24 @@ def _calibrate_with_uniform_qdq(self, model, dataloader, calib_interation): quantize_layer = { "class_name": "UniformQuantize", - "name": "uniform_quantize_" + str(idx), + "name": "quantize_" + str(idx), "config": { "scales": scales, "zero_points": zero_points, "T": T, "quantization_axis": -1, - "name": "uniform_quantize_" + str(idx), + "name": "quantize_" + str(idx), }, } dequantize_layer = { "class_name": "UniformDeQuantize", - "name": "uniform_dequantize_" + str(idx), + "name": "dequantize_" + str(idx), "config": { "scales": scales, "zero_points": zero_points, "T": T, "quantization_axis": -1, - "name": "uniform_dequantize_" + str(idx), + "name": "dequantize_" + str(idx), }, } if "inbound_nodes" in layer: @@ -579,10 +577,15 @@ def _calibrate_with_uniform_qdq(self, model, dataloader, calib_interation): t_dim.extend(dim) channel_size = kernel.shape[-1] kernel_channel = kernel.transpose(t_dim).reshape(channel_size, -1) - min_value = json.dumps(np.min(kernel_channel, axis=1).tolist()) - max_value = json.dumps(np.max(kernel_channel, axis=1).tolist()) - layer_config["scales"] = max(abs(max_value), abs(min_value))/127 - layer_config["zero_points"] = 0 + min_value = np.min(kernel_channel, axis=1).tolist() + max_value = np.max(kernel_channel, axis=1).tolist() + scales = [] + zero_points = [] + for i in range(len(max_value)): + scales.append(max(abs(max_value[i]), abs(min_value[i]))/127) + zero_points.append(0) + layer_config["scales"] = json.dumps(scales) + layer_config["zero_points"] = json.dumps(zero_points) else: # default value, but never expected to be used # cause no kernel weights for this layer diff --git a/neural_compressor/adaptor/keras_utils/quantizer.py b/neural_compressor/adaptor/keras_utils/quantizer.py index 0c394c967e4..f2137d228d5 100644 --- a/neural_compressor/adaptor/keras_utils/quantizer.py +++ b/neural_compressor/adaptor/keras_utils/quantizer.py @@ -144,29 +144,26 @@ def __init__( zero_points, T="s8", quantization_axis=-1, - name=None **kwargs ): - super(Quantize, self).__init__(**kwargs) + super(UniformQuantize, self).__init__(**kwargs) T_map = {"s8": tf.qint8, "u8": tf.quint8} self.scales = float(scales) - self.zero_points = float(zero_points) + self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.name = name self.quantization_min_val = -127 if T=="s8" else 0 self.quantization_max_val = 128 if T=="s8" else 255 def call(self, inputs): outputs = tf.raw_ops.UniformQuantize( input=inputs, - scales=tf.constant([0.4],dtype=tf.float32, shape=()), - zero_points=tf.constant([0], dtype=tf.int32, shape=()), + scales=self.scales, + zero_points=self.zero_points, Tout=self.T, quantization_min_val=self.quantization_min_val, quantization_max_val=self.quantization_max_val, - quantization_axis=self.quantization_axis, - name=self.name) + quantization_axis=self.quantization_axis) return outputs @@ -176,7 +173,6 @@ def get_config(self): "zero_points": self.zero_points, "T": self.T, "quantization_axis": self.quantization_axis, - "name": self.name, } @classmethod @@ -190,15 +186,13 @@ def __init__(self, zero_points, T="s8", quantization_axis=-1, - name=None **kwargs): - super(DeQuantize, self).__init__(**kwargs) + super(UniformDeQuantize, self).__init__(**kwargs) T_map = {"s8": tf.qint8, "u8": tf.quint8} self.scales = float(scales) - self.zero_points = float(zero_points) + self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.name = name self.quantization_min_val = -127 if T=="s8" else 0 self.quantization_max_val = 128 if T=="s8" else 255 @@ -210,8 +204,7 @@ def call(self, inputs): Tout=tf.float32, quantization_min_val=self.quantization_min_val, quantization_max_val=self.quantization_max_val, - quantization_axis=self.quantization_axis, - name=self.name) + quantization_axis=self.quantization_axis) def get_config(self): return { @@ -219,7 +212,6 @@ def get_config(self): "zero_points": self.zero_points, "T": self.T, "quantization_axis": self.quantization_axis, - "name": self.name, } @classmethod From f5e6726627e3ccb7dabce3869862b45e3464fce3 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Wed, 20 Mar 2024 14:13:32 +0800 Subject: [PATCH 03/25] fix import Signed-off-by: zehao-intel --- .../keras/image_recognition/resnet50/quantization/ptq/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/keras/image_recognition/resnet50/quantization/ptq/main.py b/examples/keras/image_recognition/resnet50/quantization/ptq/main.py index 3fedd756ecf..19a277f829e 100644 --- a/examples/keras/image_recognition/resnet50/quantization/ptq/main.py +++ b/examples/keras/image_recognition/resnet50/quantization/ptq/main.py @@ -114,7 +114,7 @@ def main(_): set_random_seed(9527) if FLAGS.tune: from neural_compressor import quantization - from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.config import PostTrainingQuantConfig, AccuracyCriterion conf = PostTrainingQuantConfig(backend='itex', calibration_sampling_size=[50, 100], accuracy_criterion = AccuracyCriterion(tolerable_loss=0.1)) From 70cb8d34b66413bdc394df31561eb61c2af279d4 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Fri, 19 Apr 2024 16:58:49 +0800 Subject: [PATCH 04/25] support saved_model out Signed-off-by: zehao-intel --- .../image_recognition/hf_resne50/main.py | 38 ++++ .../quantization/ptq/tune_squad.py | 8 +- neural_compressor/adaptor/keras.py | 4 + neural_compressor/adaptor/tensorflow.py | 4 +- .../adaptor/tf_utils/graph_converter.py | 5 +- .../int8/convert_qdq_to_uniform_qdq.py | 166 ++++++++++++++++++ 6 files changed, 219 insertions(+), 6 deletions(-) create mode 100644 examples/keras/image_recognition/hf_resne50/main.py create mode 100644 neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py diff --git a/examples/keras/image_recognition/hf_resne50/main.py b/examples/keras/image_recognition/hf_resne50/main.py new file mode 100644 index 00000000000..31ac8c8781d --- /dev/null +++ b/examples/keras/image_recognition/hf_resne50/main.py @@ -0,0 +1,38 @@ +from neural_compressor.tensorflow.utils import BaseDataLoader +import tensorflow as tf +from transformers import AutoImageProcessor +from datasets import load_dataset + +dataset = load_dataset("huggingface/cats-image") +image = dataset["test"]["image"][0] + +image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") +input_data = image_processor(image, return_tensors="tf") + +class Dataset(object): + def __init__(self, batch_size=100): + self.length = 100 + self.batch_size = 1 + self.data = [input_data['pixel_values'].numpy()]*100 + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return self.data[idx][0], None + + +calib_dataloader = BaseDataLoader(dataset=Dataset()) + +from neural_compressor.quantization import fit +from neural_compressor.config import PostTrainingQuantConfig +from neural_compressor import set_random_seed +set_random_seed(9527) +config = PostTrainingQuantConfig(backend='itex', + calibration_sampling_size=[100]) +q_model = fit( + model="resnet50-saved-model/saved_model/1", + conf=config, + calib_dataloader=calib_dataloader, + eval_func=evaluate) +q_model.save("resnet50_uniform_qdq") \ No newline at end of file diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py index 875852e34ec..d64aa3609f6 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py @@ -137,10 +137,14 @@ def eval(model): from neural_compressor.config import PostTrainingQuantConfig conf = PostTrainingQuantConfig(inputs=['input_ids', 'input_mask', 'segment_ids'], outputs=['start_logits', 'end_logits'], - calibration_sampling_size=[500]) + calibration_sampling_size=[500], + backend="itex") q_model = quantization.fit(FLAGS.input_model, conf=conf, calib_dataloader=dataloader, eval_func=eval) - q_model.save(FLAGS.output_model) + from neural_compressor.model.tensorflow_model import TensorflowSavedModelModel + SMmodel = TensorflowSavedModelModel(qmodel._model) + SMmodel.graph_def = q_model.graph_def + SMmodel.save(FLAGS.output_model) if __name__ == "__main__": tf.compat.v1.app.run() diff --git a/neural_compressor/adaptor/keras.py b/neural_compressor/adaptor/keras.py index 9792b52c8b3..eedc3729752 100644 --- a/neural_compressor/adaptor/keras.py +++ b/neural_compressor/adaptor/keras.py @@ -655,11 +655,15 @@ def _set_weights(self, qmodel, layer_weights): if qlayer.get_weights(): if qlayer.name in layer_weights: qlayer.set_weights(layer_weights[qlayer.name]) + if hasattr(qlayer, "kernel"): + qlayer.kernel = qlayer.kernel.numpy() else: hit_layer = False for sub_layer in qlayer.submodules: if sub_layer.name in layer_weights: qlayer.set_weights(layer_weights[sub_layer.name]) + if hasattr(qlayer, "kernel"): + qlayer.kernel = qlayer.kernel.numpy() hit_layer = True break if not hit_layer: diff --git a/neural_compressor/adaptor/tensorflow.py b/neural_compressor/adaptor/tensorflow.py index 212c233a530..daec595ff47 100644 --- a/neural_compressor/adaptor/tensorflow.py +++ b/neural_compressor/adaptor/tensorflow.py @@ -111,8 +111,8 @@ def __init__(self, framework_specific_info): cfg_yaml_name = "{}.yaml".format(self.__class__.__name__[: -len("Adaptor")].lower()) self.itex_mode = self.backend == "itex" or cfg_yaml_name == "tensorflow_itex.yaml" - if self.itex_mode: - self._check_itex() + # if self.itex_mode: + # self._check_itex() self.query_handler = TensorflowQuery( local_config_file=os.path.join(os.path.dirname(__file__), cfg_yaml_name), diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index 67c205a646d..4fffbce6994 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -935,8 +935,9 @@ def _convert_qdq(self): ).do_transformation() self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() - self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() - + # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() + from neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer + self._tmp_graph_def = ConvertUniformQDQOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def self._tmp_model.graph_def.library.CopyFrom(self.model.graph_def.library) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py new file mode 100644 index 00000000000..8e466e885b3 --- /dev/null +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fuse QuantizedMatMul with Requantize/Dequantize Graph Rewriter.""" + +import numpy as np +import tensorflow as tf +from tensorflow.core.framework import attr_value_pb2, node_def_pb2 +from tensorflow.python.framework import dtypes, tensor_util + +from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer +from neural_compressor.tensorflow.quantization.utils.graph_util import GraphRewriterHelper as Helper +from neural_compressor.tensorflow.utils import version1_gt_version2, version1_lt_version2 + +from ..graph_base import GraphRewriterBase + + +class ConvertUniformQDQOptimizer(GraphRewriterBase): + """Fuse newAPI Quantized MatMul Op with the successor Requantize Op.""" + + def __init__(self, model, device="cpu"): + """Initialization.""" + super().__init__(model) + self.device = device + self.graph_analyzer = GraphAnalyzer() + self.graph_analyzer.graph = self.model + self.eps = 1e-05 + self.graph_info = self.graph_analyzer.parse_graph() + + self.uint8_type = dtypes.quint8.as_datatype_enum + self.int8_type = dtypes.qint8.as_datatype_enum + self.float32_type = dtypes.float32.as_datatype_enum + self.qint32_type = dtypes.qint32.as_datatype_enum + + self.quantization_min_val = None + self.quantization_max_val = None + + def _calculate_zp_and_scale(self, min_value, max_value, dtype): + if dtype == attr_value_pb2.AttrValue(type=self.int8_type): + zp = 0 + scale_range = 127 + self.quantization_min_val = -127 + self.quantization_max_val = 128 + elif dtype == attr_value_pb2.AttrValue(type=self.uint8_type): + zp = 128 + scale_range = 255 + self.quantization_min_val = 0 + self.quantization_max_val = 255 + else: + raise ValueError("Unexpected data type for Quantize Op.") + + if isinstance(max_value, float): + return zp, max(abs(max_value), abs(min_value))/scale_range + + scales = [] + zero_points = [] + for i in range(len(max_value)): + scales.append(max(abs(max_value[i]), abs(min_value[i]))/scale_range) + zero_points.append(zp) + + return zero_points, scales + + def do_transformation(self): + """Fuse the quantized op with the following requantize op. + + Returns: + [graphdef]: the optimized graphdef object + """ + target_nodes = self.graph_analyzer.query_fusion_pattern_nodes( + [["QuantizeV2"], ["Dequantize"]] + ) + for i in target_nodes: + shared_quantize_node = False + quantize_node_name = i[0] + dequantize_node_name = i[1] + dequantize_node = self.graph_info[dequantize_node_name].node + + quantize_node = self.graph_info[quantize_node_name].node + quantize_min_name = quantize_node.input[1] + quantize_max_name = quantize_node.input[2] + + dtype = quantize_node.attr["T"] + min_value = self.graph_info[quantize_min_name].node.attr["value"].tensor.float_val[0] + max_value = self.graph_info[quantize_max_name].node.attr["value"].tensor.float_val[0] + + zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype) + zero_point_name = quantize_min_name[:-4] + "zero_point" + scale_name = quantize_min_name[:-4] + "scale" + + zero_point_node = Helper.create_constant_node(zero_point_name, zero_point_value, dtypes.int32, device="cpu") + scale_node = Helper.create_constant_node(scale_name, scale_value, dtypes.float32, device="cpu") + + uniform_quantize_node = node_def_pb2.NodeDef() + uniform_quantize_node.op = "UniformQuantize" + uniform_quantize_node.name = quantize_node_name+"_UniformQuantize" + uniform_quantize_node.input.extend([quantize_node.input[0], scale_name, zero_point_name]) + Helper.set_attr_int(uniform_quantize_node, "quantization_min_val", self.quantization_min_val) + Helper.set_attr_int(uniform_quantize_node, "quantization_max_val", self.quantization_max_val) + Helper.set_attr_dtype(uniform_quantize_node, "Tin", dtypes.float32) + + if "axis" in quantize_node.attr: + uniform_quantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["axis"]) + uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) + + uniform_dequantize_node = node_def_pb2.NodeDef() + uniform_dequantize_node.op = "UniformDequantize" + uniform_dequantize_node.name = dequantize_node_name+"_UniformDequantize" + + uniform_dequantize_node.input.extend([uniform_quantize_node.name, + scale_name, + zero_point_name, + ]) + Helper.set_attr_int(uniform_dequantize_node, "quantization_min_val", self.quantization_min_val) + Helper.set_attr_int(uniform_dequantize_node, "quantization_max_val", self.quantization_max_val) + Helper.set_attr_dtype(uniform_dequantize_node, "Tout", dtypes.float32) + + if "quantization_axis" in quantize_node.attr: + uniform_dequantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["quantization_axis"]) + if "Tin" in uniform_quantize_node.attr: + uniform_dequantize_node.attr["Tin"].CopyFrom(uniform_quantize_node.attr["Tout"]) + + parent_node_name = Helper.node_name_from_input(quantize_node.input[0]) + + self.graph_analyzer.add_node(zero_point_node, None, [uniform_quantize_node.name]) + self.graph_analyzer.add_node(scale_node, None, [uniform_quantize_node.name]) + + quantize_output_node_name = set() + for node_name in self.graph_info[quantize_node_name].outputs: + quantize_output_node_name.add(node_name) + self.graph_analyzer.replace_single_node( + uniform_quantize_node, + [parent_node_name], + quantize_node_name, + [i for i in quantize_output_node_name], + quantize_node_name, + ) + + dequantize_output_node_name = set() + for node_name in self.graph_info[dequantize_node_name].outputs: + dequantize_output_node_name.add(node_name) + self.graph_analyzer.replace_single_node( + uniform_dequantize_node, + [uniform_quantize_node.name], + dequantize_node_name, + [i for i in dequantize_output_node_name], + dequantize_node_name, + ) + + self.graph_analyzer.remove_node(quantize_node_name) + self.graph_analyzer.remove_node(dequantize_node_name) + + + return self.graph_analyzer.dump_graph() \ No newline at end of file From 5c03b796136bd7bbebd7f307dd774f7a743d3ca6 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Sun, 21 Apr 2024 22:16:21 +0800 Subject: [PATCH 05/25] fix import Signed-off-by: zehao-intel --- .../adaptor/tf_utils/graph_converter.py | 2 +- neural_compressor/model/tensorflow_model.py | 4 +- neural_compressor/tensorflow/utils/data.py | 331 +++++++++++++++--- 3 files changed, 277 insertions(+), 60 deletions(-) diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index 4fffbce6994..2854a9766d8 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -936,7 +936,7 @@ def _convert_qdq(self): self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() - from neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer + from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer self._tmp_graph_def = ConvertUniformQDQOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def diff --git a/neural_compressor/model/tensorflow_model.py b/neural_compressor/model/tensorflow_model.py index e4809863a55..5ef2d5095b2 100644 --- a/neural_compressor/model/tensorflow_model.py +++ b/neural_compressor/model/tensorflow_model.py @@ -81,8 +81,8 @@ def get_model_type(model): return "graph" elif isinstance(model, tf.compat.v1.GraphDef): return "graph_def" - elif isinstance(model, tf.compat.v1.estimator.Estimator): - return "estimator" + # elif isinstance(model, tf.compat.v1.estimator.Estimator): + # return "estimator" elif isinstance(model, str): model = os.path.abspath(os.path.expanduser(model)) if model.endswith(".pb") and os.path.isfile(model): diff --git a/neural_compressor/tensorflow/utils/data.py b/neural_compressor/tensorflow/utils/data.py index 8e0f7dc8cc0..ab50de81fc5 100644 --- a/neural_compressor/tensorflow/utils/data.py +++ b/neural_compressor/tensorflow/utils/data.py @@ -17,16 +17,220 @@ # ============================================================================== """BaseDataloder of all dataloaders.""" +import collections +import math import sys from abc import abstractmethod import numpy as np +import tensorflow as tf from neural_compressor.common import logger +def default_collate(batch): # pragma: no cover + """Merge data with outer dimension batch size.""" + elem = batch[0] + if isinstance(elem, collections.abc.Mapping): + return {key: default_collate([d[key] for d in batch]) for key in elem} + elif isinstance(elem, collections.abc.Sequence): + batch = zip(*batch) + return [default_collate(samples) for samples in batch] + elif isinstance(elem, np.ndarray): + try: + return np.stack(batch) + except: + return batch + else: + return batch + + +class IterableFetcher: + """Iterate to get next batch-size samples as a batch.""" + + def __init__(self, dataset, collate_fn, drop_last, distributed): + """Initialize IterableFetcher. + + Args: + dataset (object): dataset object from which to get data + collate_fn (callable): merge data with outer dimension batch size + drop_last (bool): whether to drop the last batch if it is incomplete + distributed (bool): whether the dataloader is distributed + """ + self.dataset = dataset + self.collate_fn = collate_fn + self.drop_last = drop_last + self.dataset_iter = iter(dataset) + self.index_whole = 0 + self.process_rank = 0 # The default rank is 0, which represents the main process + self.process_size = 1 # By default, process_size=1, only the main process is running + if distributed: + import horovod.tensorflow as hvd + + hvd.init() + self.process_rank = hvd.rank() + self.process_size = hvd.size() + if self.process_size < 2: + raise EnvironmentError( + "The program is now trying to traverse" + " the distributed TensorFlow DefaultDataLoader in only one process." + " If you do not want to use distributed DataLoader, please set" + " 'distributed: False'. Or If you want to use distributed DataLoader," + " please set 'distributed: True' and launch multiple processes." + ) + + def __call__(self, batched_indices): + """Fetch data. + + Args: + batched_indices (list): fetch data according to batched_indices + """ + batch_data = [] + batch_size = len(batched_indices) + while True: + try: + iter_data = next(self.dataset_iter) + if (self.index_whole - self.process_rank) % self.process_size == 0: + batch_data.append(iter_data) + self.index_whole += 1 + if len(batch_data) == batch_size: + break + except StopIteration: + break + if len(batch_data) == 0 or (self.drop_last and len(batch_data) < len(batched_indices)): + raise StopIteration + return self.collate_fn(batch_data) + + +class IndexFetcher: + """Take single index or a batch of indices to fetch samples as a batch.""" + + def __init__(self, dataset, collate_fn, drop_last, distributed): + """Initialize IndexFetcher. + + Args: + dataset (object): dataset object from which to get data + collate_fn (callable): merge data with outer dimension batch size + drop_last (bool): whether to drop the last batch if it is incomplete + distributed (bool): whether the dataloader is distributed + """ + self.dataset = dataset + self.collate_fn = collate_fn + self.drop_last = drop_last + + def __call__(self, batched_indices): + """Fetch data. + + Args: + batched_indices (list): fetch data according to batched_indices + """ + data = [self.dataset[idx] for idx in batched_indices] + return self.collate_fn(data) + + +class IterableSampler: + """Internally samples elements. + + Used for datasets retrieved element by iterator. Yield None to act as a placeholder for each iteration. + """ + + def __init__(self, dataset): + """Initialize IterableSampler. + + Args: + dataset (object): dataset object from which to get data + """ + self.whole_dataset = dataset + + def __iter__(self): + """Yield data in iterative order.""" + while True: + yield None + + def __len__(self): + """Return the length of dataset.""" + return len(self.whole_dataset) + + +class SequentialSampler: + """Sequentially samples elements, used for datasets retrieved element by index.""" + + def __init__(self, dataset, distributed): + """Initialize SequentialSampler. + + Args: + dataset (object): dataset object from which to get data + distributed (bool): whether the dataloader is distributed + """ + self.whole_dataset = dataset + self.distributed = distributed + + def __iter__(self): + """Yield data in iterative order.""" + self.process_rank = 0 # The default rank is 0, which represents the main process + self.process_size = 1 # By default, process_size=1, only the main process is running + if self.distributed: + import horovod.tensorflow as hvd + + hvd.init() + self.process_rank = hvd.rank() + self.process_size = hvd.size() + if self.process_size < 2: + raise EnvironmentError( + "The program is now trying to traverse" + " the distributed TensorFlow DefaultDataLoader in only one process." + " If you do not want to use distributed DataLoader, please set" + " 'distributed: False'. Or If you want to use distributed DataLoader," + " please set 'distributed: True' and launch multiple processes." + ) + return iter(range(self.process_rank, len(self.whole_dataset), self.process_size)) + + def __len__(self): + """Return the length of dataset.""" + return len(self.whole_dataset) + + +class BatchSampler: + """Yield a batch of indices and number of batches.""" + + def __init__(self, sampler, batch_size, drop_last=True): + """Initialize BatchSampler. + + Args: + sampler (Sampler): sampler used for generating batches + batch_size (int): size of batch + drop_last (bool, optional): whether to drop the last batch if it is incomplete. Defaults to True. + """ + if isinstance(drop_last, bool): + self.drop_last = drop_last + else: + raise ValueError("last_batch only support bool as input") + + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + def __iter__(self): + """Yield data in iterative order.""" + batch = [] + for idx in self.sampler: + batch.append(idx) + if len(batch) == self.batch_size: + yield batch + batch = [] + if len(batch) > 0 and not self.drop_last: + yield batch + + def __len__(self): + """Return the number of batches.""" + if self.drop_last: + return len(self.sampler) // self.batch_size + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size + + class BaseDataLoader: # pragma: no cover - """Base class for all DataLoaders. + """Base class for TF DataLoaders. _generate_dataloader is needed to create a dataloader object from the general params like batch_size and sampler. The dynamic batching is just to @@ -46,7 +250,7 @@ def __init__( shuffle=False, distributed=False, ): - """Initialize BaseDataLoader. + """Initialize DefaultDataLoader. Args: dataset (object): dataset from which to load the data @@ -63,73 +267,63 @@ def __init__( distributed (bool, optional): whether the dataloader is distributed. Defaults to False. """ self.dataset = dataset - self.collate_fn = collate_fn + self.last_batch = last_batch self.sampler = sampler self.batch_sampler = batch_sampler self.num_workers = num_workers self.pin_memory = pin_memory - self._batch_size = batch_size + self.collate_fn = collate_fn + self.batch_size = batch_size self.shuffle = shuffle self.distributed = distributed - self.last_batch = last_batch self.drop_last = False if last_batch == "rollover" else True + if self.collate_fn is None: + self.collate_fn = default_collate - self.dataloader = self._generate_dataloader( - self.dataset, - batch_size=batch_size, - last_batch=last_batch, - collate_fn=collate_fn, - sampler=sampler, - batch_sampler=batch_sampler, - num_workers=num_workers, - pin_memory=pin_memory, - shuffle=shuffle, - distributed=distributed, - ) - - def batch(self, batch_size, last_batch=None): - """Set batch size for dataloader. - - Args: - batch_size (int): number of samples per batch. - last_batch (str, optional): whether to drop the last batch if it is incomplete. - Support ['rollover', 'discard'], rollover means False, discard means True. - Defaults to None. - """ - self._batch_size = batch_size - if last_batch is not None: - self.last_batch = last_batch - self.dataloader = self._generate_dataloader( - self.dataset, - batch_size, - self.last_batch, - self.collate_fn, - self.sampler, - self.batch_sampler, - self.num_workers, - self.pin_memory, - self.shuffle, - self.distributed, - ) + def batch(self, batch_size, last_batch="rollover"): + """Set batch_size and last_batch.""" + self.batch_size = batch_size + self.last_batch = last_batch @property - def batch_size(self): - """Get dataloader's batch_size. - - Returns: - int: batch_size - """ - return self._batch_size + def dataloader(self): + """Return dataloader.""" + return self def __iter__(self): - """Yield data in iterative order. + """Yield data in iterative order.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + collate_fn=self.collate_fn, + sampler=self.sampler, + batch_sampler=self.batch_sampler, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + shuffle=self.shuffle, + distributed=self.distributed, + ) - Returns: - iterator: iterator for dataloder - """ - return iter(self.dataloader) + def __len__(self): + """Get dataset length.""" + try: + dataset_len = self.dataset.__len__() + except (AttributeError, TypeError): + dataset_len = 0 + for _ in self.dataset: + dataset_len += 1 + except Exception: + raise ValueError( + f"{self.dataset} is invalid, {self.dataset}" + " does not support calculating the length of its dataloader" + ) + if self.drop_last is False: + dataloader_len = math.ceil(dataset_len / self.batch_size) + else: + dataloader_len = math.floor(dataset_len / self.batch_size) + return dataloader_len - @abstractmethod def _generate_dataloader( self, dataset, @@ -143,7 +337,30 @@ def _generate_dataloader( shuffle, distributed, ): - raise NotImplementedError + sampler = self._generate_sampler(dataset, distributed) + self.batch_sampler = BatchSampler(sampler, batch_size, self.drop_last) + + if self.dataset_type == "index": + self.fetcher = IndexFetcher(dataset, collate_fn, self.drop_last, distributed) + elif self.dataset_type == "iter": + self.fetcher = IterableFetcher(dataset, collate_fn, self.drop_last, distributed) + + for batched_indices in self.batch_sampler: + try: + data = self.fetcher(batched_indices) + yield data + except StopIteration: + return + + def _generate_sampler(self, dataset, distributed): + if hasattr(dataset, "__getitem__"): + self.dataset_type = "index" + return SequentialSampler(dataset, distributed) + elif hasattr(dataset, "__iter__"): + self.dataset_type = "iter" + return IterableSampler(dataset) + else: + raise ValueError("dataset type only support (index, iter)") class DummyDataset: # pragma: no cover @@ -385,4 +602,4 @@ def __iter__(self): def __len__(self): """Return the length of dataset.""" - return sys.maxsize + return sys.maxsize \ No newline at end of file From b1ca5385303f12bbe9372c6c6e0bc7499a8efe94 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 22 Apr 2024 12:27:03 +0800 Subject: [PATCH 06/25] fix issues Signed-off-by: zehao-intel --- .../image_recognition/hf_resne50/main.py | 38 -- .../tensorflow/algorithms/smoother/core.py | 6 +- .../algorithms/static_quant/keras.py | 322 ++++++------ .../algorithms/static_quant/tensorflow.py | 9 +- .../algorithms/static_quant/tensorflow.yaml | 4 +- .../tensorflow/keras/layers/__init__.py | 5 +- .../tensorflow/keras/layers/conv2d.py | 418 ++++++++++++---- .../tensorflow/keras/layers/dense.py | 139 ++++-- .../keras/layers/depthwise_conv2d.py | 472 +++++++++++++----- .../keras/layers/layer_initializer.py | 2 +- .../tensorflow/keras/layers/pool2d.py | 184 ++++++- .../keras/layers/separable_conv2d.py | 454 +++++++++++++---- .../tensorflow/keras/quantization/config.py | 14 +- .../quantization/algorithm_entry.py | 11 +- .../tensorflow/quantization/config.py | 10 +- .../quantization/utils/graph_converter.py | 17 +- .../generic/fuse_pad_with_conv.py | 21 +- .../generic/fuse_pad_with_fp32_conv.py | 20 +- .../int8/convert_qdq_to_uniform_qdq.py | 169 +++++++ .../int8/fuse_matmul_requantize.py | 10 +- .../graph_rewriter/qdq/insert_qdq_pattern.py | 2 + .../quantization/utils/graph_util.py | 3 + .../tensorflow/quantization/utils/utility.py | 90 ++-- .../tensorflow/utils/__init__.py | 6 +- .../tensorflow/utils/constants.py | 11 +- neural_compressor/tensorflow/utils/data.py | 2 +- neural_compressor/tensorflow/utils/model.py | 95 ++-- .../tensorflow/utils/model_wrappers.py | 275 +++++----- neural_compressor/tensorflow/utils/utility.py | 2 +- 29 files changed, 1924 insertions(+), 887 deletions(-) delete mode 100644 examples/keras/image_recognition/hf_resne50/main.py create mode 100644 neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py diff --git a/examples/keras/image_recognition/hf_resne50/main.py b/examples/keras/image_recognition/hf_resne50/main.py deleted file mode 100644 index 31ac8c8781d..00000000000 --- a/examples/keras/image_recognition/hf_resne50/main.py +++ /dev/null @@ -1,38 +0,0 @@ -from neural_compressor.tensorflow.utils import BaseDataLoader -import tensorflow as tf -from transformers import AutoImageProcessor -from datasets import load_dataset - -dataset = load_dataset("huggingface/cats-image") -image = dataset["test"]["image"][0] - -image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") -input_data = image_processor(image, return_tensors="tf") - -class Dataset(object): - def __init__(self, batch_size=100): - self.length = 100 - self.batch_size = 1 - self.data = [input_data['pixel_values'].numpy()]*100 - - def __len__(self): - return len(self.data) - - def __getitem__(self, idx): - return self.data[idx][0], None - - -calib_dataloader = BaseDataLoader(dataset=Dataset()) - -from neural_compressor.quantization import fit -from neural_compressor.config import PostTrainingQuantConfig -from neural_compressor import set_random_seed -set_random_seed(9527) -config = PostTrainingQuantConfig(backend='itex', - calibration_sampling_size=[100]) -q_model = fit( - model="resnet50-saved-model/saved_model/1", - conf=config, - calib_dataloader=calib_dataloader, - eval_func=evaluate) -q_model.save("resnet50_uniform_qdq") \ No newline at end of file diff --git a/neural_compressor/tensorflow/algorithms/smoother/core.py b/neural_compressor/tensorflow/algorithms/smoother/core.py index d8c3af164f5..425b05bdcca 100644 --- a/neural_compressor/tensorflow/algorithms/smoother/core.py +++ b/neural_compressor/tensorflow/algorithms/smoother/core.py @@ -28,7 +28,7 @@ from neural_compressor.tensorflow.algorithms.smoother.scaler import SmoothQuantScaler, SmoothQuantScalerLLM from neural_compressor.tensorflow.quantization.config import SmoothQuantConfig from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer -from neural_compressor.tensorflow.utils import SPR_BASE_VERSIONS, BaseModel, TensorflowLLMModel, TFConfig +from neural_compressor.tensorflow.utils import SPR_BASE_VERSIONS, BaseModel, TensorflowLLMModel, framework_specific_info class SmoothQuant: @@ -55,8 +55,8 @@ def __init__( self.calib_iteration = calib_iteration self.new_api = tf.version.VERSION in SPR_BASE_VERSIONS - self.device = TFConfig.global_config["device"] - self.itex_mode = TFConfig.global_config["backend"] == "itex" + self.device = framework_specific_info["device"] + self.itex_mode = framework_specific_info["backend"] == "itex" for _, value in self.config.items(): single_config = value diff --git a/neural_compressor/tensorflow/algorithms/static_quant/keras.py b/neural_compressor/tensorflow/algorithms/static_quant/keras.py index 79ed5464a1f..b809bfd6206 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/keras.py +++ b/neural_compressor/tensorflow/algorithms/static_quant/keras.py @@ -28,18 +28,15 @@ from neural_compressor.common import logger from neural_compressor.common.utils import DEFAULT_WORKSPACE from neural_compressor.tensorflow.keras.layers import ( - DeQuantize, - FakeQuant, QAvgPool2D, QConv2D, QDense, QDepthwiseConv2D, QMaxPool2D, QSeparableConv2D, - Quantize, ) from neural_compressor.tensorflow.quantization.config import StaticQuantConfig -from neural_compressor.tensorflow.utils import deep_get, dump_elapsed_time +from neural_compressor.tensorflow.utils import deep_get, dump_elapsed_time, version1_gte_version2 class KerasAdaptor: @@ -57,9 +54,6 @@ class KerasAdaptor: ] custom_layers = { - "Quantize": Quantize, - "DeQuantize": DeQuantize, - "FakeQuant": FakeQuant, "QConv2D": QConv2D, "QDepthwiseConv2D": QDepthwiseConv2D, "QSeparableConv2D": QSeparableConv2D, @@ -91,9 +85,13 @@ def __init__(self, framework_specific_info): self.conv_format = {} self.fold_conv = [] + self.keras3 = True if version1_gte_version2(tf.__version__, "2.16.1") else False if not os.path.exists(DEFAULT_WORKSPACE): os.mkdir(DEFAULT_WORKSPACE) - self.tmp_dir = DEFAULT_WORKSPACE + "tmp_model" + self.tmp_dir = ( + (DEFAULT_WORKSPACE + "tmp_model.keras") + if self.keras3 else (DEFAULT_WORKSPACE + "tmp_model") + ) def _check_itex(self): """Check if the IntelĀ® Extension for TensorFlow has been installed.""" @@ -153,12 +151,13 @@ def _check_quantize_format(self, model): for layer in model.layers: layer_name_mapping[layer.name] = layer for node in layer._outbound_nodes: - layer_name = node.outbound_layer.name + layer_name = node.operation.name if self.keras3 else node.outbound_layer.name if layer_name not in input_layer_dict: input_layer_dict[layer_name] = [layer.name] else: input_layer_dict[layer_name].append(layer.name) + for layer in model.layers: if layer.__class__.__name__ in self.supported_op: self.conv_format[layer.name] = "s8" @@ -169,55 +168,54 @@ def _check_quantize_format(self, model): self.conv_format[layer.name] = "u8" break - def _fuse_bn(self, model): - """Fusing Batch Normalization.""" - fuse_bn_model = copy.deepcopy(model) - fp32_layers = fuse_bn_model.layers + def _fuse_bn_keras3(self, fuse_conv_bn, fp32_layers): + fuse_layers = [] + fused_bn_name = "" + for idx, layer in enumerate(fp32_layers): + if hasattr(layer, "_outbound_nodes"): + if layer.name == fused_bn_name: + continue + + if layer.name in self.conv_weights.keys(): + new_outbound_nodes = [] + conv_weight = self.conv_weights[layer.name] + for outbound_node in layer._outbound_nodes: + outbound_layer = outbound_node.operation + if outbound_layer.__class__.__name__ in ("BatchNormalization"): + fused_bn_name = outbound_layer.name + bn_weight = self.bn_weights[fused_bn_name] + self.layer_weights[layer.name] = fuse_conv_bn( + conv_weight, bn_weight, layer.__class__.__name__, outbound_layer.epsilon + ) + self.fold_conv.append(layer.name) + for node in outbound_layer._outbound_nodes: + new_outbound_nodes.append(node) + else: + new_outbound_nodes.append(outbound_node) + layer._outbound_nodes.clear() + for node in new_outbound_nodes: + layer._outbound_nodes.append(node) - def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): - assert conv_type in [ - "Conv2D", - "DepthwiseConv2D", - "SeparableConv2D", - ], "only support Conv2D, DepthwiseConv2D, SeparableConv2D..." - if len(bn_weight) > 3: - if conv_type == "DepthwiseConv2D": - gamma = bn_weight[0].reshape(1, 1, bn_weight[0].shape[0], 1) - var = bn_weight[3].reshape(1, 1, bn_weight[3].shape[0], 1) - else: - gamma = bn_weight[0].reshape(1, 1, 1, bn_weight[0].shape[0]) - var = bn_weight[3].reshape(1, 1, 1, bn_weight[3].shape[0]) - beta = bn_weight[1] - mean = bn_weight[2] + fuse_layers.append(layer) else: - gamma = 1.0 - beta = bn_weight[0] - mean = bn_weight[1] - if conv_type == "DepthwiseConv2D": - var = bn_weight[2].reshape(1, 1, bn_weight[2].shape[0], 1) + if ( + idx > 0 + and layer.__class__.__name__ == "BatchNormalization" + and fp32_layers[idx - 1].__class__.__name__ == "Conv2D" + ): + conv_name = fp32_layers[idx - 1].name + conv_weight = self.conv_weights[conv_name] + bn_weight = self.bn_weights[layer.name] + conv_type = fp32_layers[idx - 1].__class__.__name__ + + self.layer_weights[conv_name] = fuse_conv_bn(conv_weight, bn_weight, conv_type, layer.epsilon) + self.fold_conv.append(conv_name) else: - var = bn_weight[2].reshape(1, 1, 1, bn_weight[2].shape[0]) + fuse_layers.append(layer) - if len(conv_weight) == 1: - weight = conv_weight[0] - bias = np.zeros_like(beta) - elif len(conv_weight) == 2 and conv_type == "SeparableConv2D": - depth_weight = conv_weight[0] - weight = conv_weight[1] - bias = np.zeros_like(beta) - elif len(conv_weight) == 2 and conv_type != "SeparableConv2D": - weight = conv_weight[0] - bias = conv_weight[1] - elif len(conv_weight) == 3: - depth_weight = conv_weight[0] - weight = conv_weight[1] - bias = conv_weight[2] - scale_value = gamma / np.sqrt(var + eps) - weight = weight * scale_value - bias = beta + (bias - mean) * scale_value.reshape(-1) - bias = bias.reshape(-1) - return [depth_weight, weight, bias] if conv_type == "SeparableConv2D" else [weight, bias] + return fuse_layers + def _fuse_bn_keras2(self, fuse_conv_bn, fp32_layers): fuse_layers = [] for idx, layer in enumerate(fp32_layers): if hasattr(layer, "_inbound_nodes"): @@ -243,12 +241,14 @@ def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): else: for bound_node in layer._inbound_nodes: inbound_layer = bound_node.inbound_layers - if ( - not isinstance(inbound_layer, list) - and inbound_layer.name in self.bn_weights.keys() - and inbound_layer._inbound_nodes[0].inbound_layers.name in self.conv_weights.keys() - ): - new_bound_nodes.append(bn_inbound_node) + if inbound_layer in self.bn_weights.keys(): + for bn_inbound_node in inbound_layer._inbound_nodes: + bn_inbound_layer = bn_inbound_node.inbound_layers + if bn_inbound_layer.name in self.conv_weights.keys(): + new_bound_nodes.append(bn_inbound_node) + else: + if bound_node not in new_bound_nodes: + new_bound_nodes.append(bound_node) else: new_bound_nodes.append(bound_node) @@ -274,7 +274,62 @@ def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): else: fuse_layers.append(layer) - for idx, layer in enumerate(fuse_layers): + return fuse_layers + + def _fuse_bn(self, model): + """Fusing Batch Normalization.""" + model.save(self.tmp_dir) + fuse_bn_model = tf.keras.models.load_model(self.tmp_dir) + fp32_layers = fuse_bn_model.layers + + def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): + assert conv_type in [ + "Conv2D", + "DepthwiseConv2D", + "SeparableConv2D", + ], "only support Conv2D, DepthwiseConv2D, SeparableConv2D..." + if len(bn_weight) > 3: + if conv_type == "DepthwiseConv2D": + gamma = bn_weight[0].reshape(1, 1, bn_weight[0].shape[0], 1) + var = bn_weight[3].reshape(1, 1, bn_weight[3].shape[0], 1) + else: + gamma = bn_weight[0].reshape(1, 1, 1, bn_weight[0].shape[0]) + var = bn_weight[3].reshape(1, 1, 1, bn_weight[3].shape[0]) + beta = bn_weight[1] + mean = bn_weight[2] + else: + gamma = 1.0 + beta = bn_weight[0] + mean = bn_weight[1] + if conv_type == "DepthwiseConv2D": + var = bn_weight[2].reshape(1, 1, bn_weight[2].shape[0], 1) + else: + var = bn_weight[2].reshape(1, 1, 1, bn_weight[2].shape[0]) + + if len(conv_weight) == 1: + weight = conv_weight[0] + bias = np.zeros_like(beta) + elif len(conv_weight) == 2 and conv_type == "SeparableConv2D": + depth_weight = conv_weight[0] + weight = conv_weight[1] + bias = np.zeros_like(beta) + elif len(conv_weight) == 2 and conv_type != "SeparableConv2D": + weight = conv_weight[0] + bias = conv_weight[1] + elif len(conv_weight) == 3: + depth_weight = conv_weight[0] + weight = conv_weight[1] + bias = conv_weight[2] + scale_value = gamma / np.sqrt(var + eps) + weight = weight * scale_value + bias = beta + (bias - mean) * scale_value.reshape(-1) + bias = bias.reshape(-1) + return [depth_weight, weight, bias] if conv_type == "SeparableConv2D" else [weight, bias] + + fuse_bn_function = self._fuse_bn_keras3 if self.keras3 else self._fuse_bn_keras2 + fused_layers = fuse_bn_function(fuse_conv_bn, fp32_layers) + + for idx, layer in enumerate(fused_layers): if ( layer.__class__.__name__ in ("Conv2D", "DepthwiseConv2D", "SeparableConv2D") and layer.name in self.fold_conv @@ -284,15 +339,15 @@ def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): conv_layer = type(layer).from_config(conv_config) for node in layer._outbound_nodes: conv_layer._outbound_nodes.append(node) - fuse_layers[idx] = conv_layer + fused_layers[idx] = conv_layer bn_surgery = KerasSurgery(model) - bn_fused_model = bn_surgery.fuse_bn_layers(fuse_layers, self.conv_weights.keys()) + bn_fused_model = bn_surgery.fuse_bn_layers(fused_layers, self.conv_weights.keys()) bn_fused_model = self._set_weights(bn_fused_model, self.layer_weights) bn_fused_model.save(self.tmp_dir) bn_fused_model = tf.keras.models.load_model(self.tmp_dir) - + return bn_fused_model @dump_elapsed_time("Pass quantize model") @@ -333,32 +388,31 @@ def quantize(self, quant_config, model, dataloader, iteration, q_func=None): ) ) - fq_layers_dict = {} - fq_output_layers = {} - for idx, layer in enumerate(self.pre_optimized_model.layers): + from neural_compressor.tensorflow.keras.layers import layer_initializer_dict + + q_layer_dict = {} + for layer in self.pre_optimized_model.layers: if layer.__class__.__name__ in self.supported_op and layer.name in self.quantize_config["op_wise_config"]: op_config = self.quantize_config["op_wise_config"][layer.name] - mode = "per_channel" if op_config[0] else "per_tensor" - fake_q_name = "fake_quant_" + str(idx) - fake_q_layer = FakeQuant(name=fake_q_name, T=self.conv_format[layer.name], mode="per_tensor") - fq_layers_dict[layer.name] = [fake_q_layer] - fq_output_layers[fake_q_layer.name] = layer.name - self.pre_optimized_model.save(self.tmp_dir) - - fq_surgery = KerasSurgery(self.pre_optimized_model) - calibration_model = fq_surgery.insert_quant_layers(fq_layers_dict) + granularity = "per_channel" if op_config[0] else "per_tensor" + q_layer_class = "Q" + layer.__class__.__name__ + q_config = {"T": self.conv_format[layer.name], "granularity": granularity} + q_layer = layer_initializer_dict[q_layer_class](layer, q_config) + q_layer_dict[layer.name] = q_layer + + calib_surgery = KerasSurgery(self.pre_optimized_model) + calibration_model = calib_surgery.insert_quant_layers(q_layer_dict) calibration_model = self._set_weights(calibration_model, self.layer_weights) quantized_model = self._calibrate( calibration_model, dataloader, self.quantize_config["calib_iteration"], - fq_output_layers, ) return quantized_model - def _calibrate(self, model, dataloader, calib_interation, fq_output_layers): + def _calibrate(self, model, dataloader, calib_interation): """Apply calibration. Args: @@ -371,51 +425,27 @@ def _calibrate(self, model, dataloader, calib_interation, fq_output_layers): # run eagerly to fetch the numpy min/max results = {} model.compile(run_eagerly=True) - for idx, (inputs, labels) in enumerate(dataloader): + for idx, (inputs, _) in enumerate(dataloader): _ = model.predict_on_batch(inputs) - json_model = copy.deepcopy(json.loads(model.to_json())) - config = json_model["config"] - layers = config["layers"] - for layer in layers: - if layer["class_name"] == "FakeQuant": - min_value = layer["config"]["min_value"] - max_value = layer["config"]["max_value"] + for layer in model.layers: + if layer.__class__.__name__[1:] in self.supported_op and layer.name in self.quantize_config["op_wise_config"]: + min_value = layer.act_min_value.numpy() + max_value = layer.act_max_value.numpy() assert min_value < max_value, "The min value must be lower than the max value in quantization." - if layer["config"]["name"] not in results: - results[layer["config"]["name"]] = {"min": [min_value], "max": [max_value]} + if layer.name not in results: + results[layer.name] = {"min": [min_value], "max": [max_value]} else: - results[layer["config"]["name"]]["min"].append(min_value) - results[layer["config"]["name"]]["max"].append(max_value) + results[layer.name]["min"].append(min_value) + results[layer.name]["max"].append(max_value) if idx + 1 == calib_interation: break - qdq_layer_nums = 0 - qdq_layers_dict = {} - quantized_layers_dict = {} for idx, layer in enumerate(model.layers): - if layer.__class__.__name__ == "FakeQuant": - min_value = min(results[layer.name]["min"]) - max_value = max(results[layer.name]["max"]) - - quantize_layer = Quantize( - name="quantize_" + str(qdq_layer_nums), - min_range=min_value, - max_range=max_value, - T=layer.T, - ) - dequantize_layer = DeQuantize( - name="dequantize_" + str(qdq_layer_nums), - min_range=min_value, - max_range=max_value, - ) - - qdq_layer_nums += 1 - output_layer_name = fq_output_layers[layer.name] - qdq_layers_dict[output_layer_name] = [quantize_layer, dequantize_layer] - elif layer.__class__.__name__ in self.supported_op and layer.name in self.quantize_config["op_wise_config"]: - # index 0 is weight, index 1 is bias - q_layer_class = "Q" + layer.__class__.__name__ + if layer.__class__.__name__[1:] in self.supported_op and layer.name in self.quantize_config["op_wise_config"]: + layer.act_min_value = min(results[layer.name]["min"]) + layer.act_max_value = max(results[layer.name]["max"]) + layer.quant_status = "quantize" # for layers that have weights if layer.name in self.layer_weights: kernel = self.layer_weights[layer.name][0] @@ -425,26 +455,17 @@ def _calibrate(self, model, dataloader, calib_interation, fq_output_layers): channel_size = kernel.shape[-1] kernel_channel = kernel.transpose(t_dim).reshape(channel_size, -1) - layer.min_value = np.min(kernel_channel, axis=1).tolist() - layer.max_value = np.max(kernel_channel, axis=1).tolist() + layer.weight_min_value = np.min(kernel_channel, axis=1).tolist() + layer.weight_max_value = np.max(kernel_channel, axis=1).tolist() else: # default value, but never expected to be used # cause no kernel weights for this layer - layer.min_value = [-10000] - layer.max_value = [10000] - - from neural_compressor.tensorflow.keras.layers import layer_initializer_dict - - q_layer = layer_initializer_dict[q_layer_class](layer) - quantized_layers_dict[layer.name] = q_layer + layer.weight_min_value = [-10000] + layer.weight_max_value = [10000] - qdq_surgery = KerasSurgery(self.pre_optimized_model) - quantized_model = qdq_surgery.insert_quant_layers(qdq_layers_dict, quantized_layers_dict) - quantized_model = self._set_weights(quantized_model, self.layer_weights) - - quantized_model.save(self.tmp_dir) + model.save(self.tmp_dir) quantized_model = tf.keras.models.load_model(self.tmp_dir) - + return quantized_model @dump_elapsed_time(customized_msg="Model inference") @@ -456,7 +477,6 @@ def evaluate( metrics=None, measurer=None, iteration=-1, - tensorboard=False, fp32_baseline=False, ): """The function is used to run evaluation on validation dataset. @@ -468,7 +488,6 @@ def evaluate( metric (object, optional): Depends on model category. Defaults to None. measurer (object, optional): for precise benchmark measurement. iteration(int, optional): control steps of mini-batch - tensorboard (boolean, optional): for tensorboard inspect tensor. fp32_baseline (boolean, optional): only for compare_label=False pipeline """ # use keras object @@ -584,7 +603,7 @@ def tuning_cfg_to_fw(self, tuning_cfg): """Parse tune_config and set framework variables. Args: - tuning_cfg (dict): The dict of tuning config. + tuning_cfg (dict): The dict of tunning config. """ self.quantize_config["calib_iteration"] = tuning_cfg["calib_iteration"] self.quantize_config["device"] = self.device @@ -784,14 +803,20 @@ def __init__(self, model): model: the model to be modified. """ self.model_outputs = [] - self.model = copy.deepcopy(model) + self.keras3 = True if version1_gte_version2(tf.__version__, "2.16.1") else False + self.tmp_dir = ( + (DEFAULT_WORKSPACE + "tmp_model.keras") + if self.keras3 else (DEFAULT_WORKSPACE + "tmp_model") + ) + model.save(self.tmp_dir) + self.model = tf.keras.models.load_model(self.tmp_dir) def _create_input_dict(self, fuse_layers=None, conv_weights_keys=None): """Create a input_layer_dict from model. Args: - fuse_layers: The layers in which fused BNs have been excluded, default to be None. - conv_weights_keys: The names of conv layers where BNs are going to be fused, default to be None. + fuse_layers: The layers in which fused BNs have been excluded, defualt to be None. + conv_weights_keys: The names of conv layers where BNs are going to be fused, defualt to be None. Returns: input_layer_dict: The dict that mapping for layer names to their input layer names. @@ -800,14 +825,15 @@ def _create_input_dict(self, fuse_layers=None, conv_weights_keys=None): layers = fuse_layers if fuse_layers else self.model.layers for layer in layers: for node in layer._outbound_nodes: - out_layer = node.outbound_layer + out_layer = node.operation if self.keras3 else node.outbound_layer out_layer_names = [out_layer.name] if ( conv_weights_keys and out_layer.__class__.__name__ in ("BatchNormalization") and layer.name in conv_weights_keys ): - out_layer_names = [node.outbound_layer.name for node in out_layer._outbound_nodes] + out_layer_names = [node.operation.name for node in out_layer._outbound_nodes] \ + if self.keras3 else [node.outbound_layer.name for node in out_layer._outbound_nodes] for out_layer_name in out_layer_names: if out_layer_name not in input_layer_dict: @@ -840,22 +866,23 @@ def fuse_bn_layers(self, fuse_layers, conv_weights_keys): while isinstance(input_tensors, list) and len(input_tensors) == 1: input_tensors = input_tensors[0] + + if self.keras3: + layer._inbound_nodes.clear() x = layer(input_tensors) output_tensor_dict[layer.name] = x if layer.name in self.model.output_names: self.model_outputs.append(x) - + return tf.keras.models.Model(inputs=self.model.inputs, outputs=self.model_outputs) - def insert_quant_layers(self, qdq_layer_dict, q_layer_dict=None): + def insert_quant_layers(self, q_layer_dict=None): """Insert FakeQuant or QDQ layers before the target layers and replace Keras layers to Quantized layers. Args: - qdq_layer_dict: The dict mapping from layers to be quantized to the FakeQuant layer or QDQ layers - that are going to be inserted before them. q_layer_dict: The dict mapping from layers to be replacement to the quantized layers. """ self.input_layer_dict = self._create_input_dict() @@ -874,14 +901,11 @@ def insert_quant_layers(self, qdq_layer_dict, q_layer_dict=None): while isinstance(input_tensors, list) and len(input_tensors) == 1: input_tensors = input_tensors[0] - if layer.name in qdq_layer_dict: - x = input_tensors - for inserted_layer in qdq_layer_dict[layer.name]: - x = inserted_layer(x) - cur_layer = layer if not q_layer_dict else q_layer_dict[layer.name] - x = cur_layer(x) - else: - x = layer(input_tensors) + if self.keras3: + layer._inbound_nodes.clear() + + cur_layer = q_layer_dict[layer.name] if q_layer_dict and layer.name in q_layer_dict else layer + x = cur_layer(input_tensors) output_tensor_dict[layer.name] = x if layer.name in self.model.output_names: diff --git a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py index 18f514ba306..ffde6730fd4 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py +++ b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py @@ -440,7 +440,6 @@ def _tuning_cfg_to_fw(self, tuning_cfg): if "activation" in tuning_cfg["op"][each_op_info]: is_asymmetric = tuning_cfg["op"][each_op_info]["activation"]["scheme"] == "asym" self.quantize_config["op_wise_config"][op_name] = (is_perchannel, algorithm, is_asymmetric, weight_bit) - self.fp32_ops = fp32_ops self.bf16_ops = bf16_ops @@ -1521,6 +1520,12 @@ def recover_tuned_model(self, model, q_config): return converter.convert_without_calib() + def diagnosis_helper(self, fp32_model, quan_model, tune_cfg, save_path): + """Tensorflow diagnosis helper function.""" + from neural_compressor.tensorflow.quantization.utils.utility import tf_diagnosis_helper + + return tf_diagnosis_helper(fp32_model, quan_model, tune_cfg, save_path) + def get_output_op_names(self, qmodel): """Get the oupur OPs's names.""" from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer @@ -1751,7 +1756,7 @@ def quantize( tmp_iterations = int(math.ceil(self.calib_sampling_size / calib_batch_size)) calib_dataloader.batch(calib_batch_size) self.quantize_config["calib_iteration"] = tmp_iterations - + converted_model = GraphConverter( model, qt_config=self.quantize_config, diff --git a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml index acd8857eda0..9e9d7e5952c 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml +++ b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml @@ -16,7 +16,7 @@ --- - version: - name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341'] + name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1'] bf16: ["_MklLayerNorm", "Conv2D", "Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2", "DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "GRUBlockCell", @@ -150,7 +150,7 @@ - version: - name: ['2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.6.1', '2.6.2', '2.7.0', '2.8.0', '2.9.0', '2.9.1', '2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0', '2.14.1', '2.15.0', '2.15.1', '1.15.0-up1', '1.15.0-up2', '1.15.0-up3'] + name: ['2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.6.1', '2.6.2', '2.7.0', '2.8.0', '2.9.0', '2.9.1', '2.10.0', '2.11.0', '2.12.0', '2.13.0', '1.15.0-up1', '1.15.0-up2', '1.15.0-up3'] bf16: ['Conv2D', 'Conv3D', 'MatMul', 'BatchMatMul', 'MaxPool', 'MaxPool3D', 'AvgPool', 'AvgPool3D', 'DepthwiseConv2dNative'] fp32: ['*'] # '*' means all op types diff --git a/neural_compressor/tensorflow/keras/layers/__init__.py b/neural_compressor/tensorflow/keras/layers/__init__.py index 0b4fe9030ac..2abb95fe0f3 100644 --- a/neural_compressor/tensorflow/keras/layers/__init__.py +++ b/neural_compressor/tensorflow/keras/layers/__init__.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2024 Intel Corporation +# Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,6 +19,5 @@ from neural_compressor.tensorflow.keras.layers.dense import QDense from neural_compressor.tensorflow.keras.layers.depthwise_conv2d import QDepthwiseConv2D from neural_compressor.tensorflow.keras.layers.pool2d import QAvgPool2D, QMaxPool2D -from neural_compressor.tensorflow.keras.layers.quantizer import DeQuantize, FakeQuant, Quantize from neural_compressor.tensorflow.keras.layers.separable_conv2d import QSeparableConv2D -from neural_compressor.tensorflow.keras.layers.layer_initializer import layer_initializer_dict +from neural_compressor.tensorflow.keras.layers.layer_initializer import layer_initializer_dict \ No newline at end of file diff --git a/neural_compressor/tensorflow/keras/layers/conv2d.py b/neural_compressor/tensorflow/keras/layers/conv2d.py index 0a4852d2027..c81366ea8ef 100644 --- a/neural_compressor/tensorflow/keras/layers/conv2d.py +++ b/neural_compressor/tensorflow/keras/layers/conv2d.py @@ -23,102 +23,332 @@ from neural_compressor.tensorflow.utils import version1_gte_version2 -if version1_gte_version2(tf.__version__, "2.13.0"): +if version1_gte_version2(tf.__version__, "2.16.1"): + from keras import ops + from keras.src.layers.convolutional.base_conv import BaseConv # pylint: disable=E0401 +elif version1_gte_version2(tf.__version__, "2.13.0"): from keras.src.layers.convolutional.base_conv import Conv # pylint: disable=E0401 else: from keras.layers.convolutional.base_conv import Conv # pylint: disable=E0401 +if version1_gte_version2(tf.__version__, "2.16.1"): + class QConv2D(BaseConv): + def __init__( + self, + name, + filters, + kernel_size, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + groups=1, + activation=None, + use_bias=True, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, + **kwargs + ): + super(QConv2D, self).__init__( + name=name, + rank=2, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + activation=activations.get(activation), + use_bias=use_bias, + kernel_initializer=initializers.get(kernel_initializer), + bias_initializer=initializers.get(bias_initializer), + kernel_regularizer=regularizers.get(kernel_regularizer), + bias_regularizer=regularizers.get(bias_regularizer), + activity_regularizer=regularizers.get(activity_regularizer), + kernel_constraint=constraints.get(kernel_constraint), + bias_constraint=constraints.get(bias_constraint), + **kwargs + ) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis -class QConv2D(Conv): - def __init__( - self, - name, - filters, - kernel_size, - strides=(1, 1), - padding="valid", - data_format=None, - dilation_rate=(1, 1), - groups=1, - activation=None, - use_bias=True, - kernel_initializer="glorot_uniform", - bias_initializer="zeros", - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - min_value=None, - max_value=None, - **kwargs - ): - super(QConv2D, self).__init__( - name=name, - rank=2, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding=padding, - data_format=data_format, - dilation_rate=dilation_rate, - groups=groups, - activation=activations.get(activation), - use_bias=use_bias, - kernel_initializer=initializers.get(kernel_initializer), - bias_initializer=initializers.get(bias_initializer), - kernel_regularizer=regularizers.get(kernel_regularizer), - bias_regularizer=regularizers.get(bias_regularizer), - activity_regularizer=regularizers.get(activity_regularizer), - kernel_constraint=constraints.get(kernel_constraint), - bias_constraint=constraints.get(bias_constraint), + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + kernel = self.kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + kernel_size = self.kernel.shape[-1] + + if not self.weight_min_value: + self.weight_min_value = [-10000]*kernel_size + if not self.weight_max_value: + self.weight_max_value = [10000]*kernel_size + + # add the Q/DQ here + kernel, _, _ = quantization.quantize( + self.kernel, self.weight_min_value, self.weight_max_value, tf.qint8, axis=3, mode="SCALED" + ) + kernel = quantization.dequantize( + kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + + outputs = self.convolution_op( + inputs, + kernel, + ) + if self.use_bias: + if self.data_format == "channels_last": + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + + if self.activation is not None: + return self.activation(outputs) + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + +else: + class QConv2D(Conv): + def __init__( + self, + name, + filters, + kernel_size, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + groups=1, + activation=None, + use_bias=True, + kernel_initializer="glorot_uniform", + bias_initializer="zeros", + kernel_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs - ) - self.min_value = min_value - self.max_value = max_value - - def call(self, inputs): - kernel_size = self.kernel.shape[-1] - - if not self.min_value: - self.min_value = [-10000] * kernel_size - if not self.max_value: - self.max_value = [10000] * kernel_size - - # add the Q/DQ here - kernel, _, _ = quantization.quantize( - self.kernel, self.min_value, self.max_value, tf.qint8, axis=3, mode="SCALED" - ) - kernel = quantization.dequantize( - kernel, - self.min_value, - self.max_value, - axis=3, - mode="SCALED", - ) - outputs = tf.keras.backend.conv2d( - inputs, - kernel, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilation_rate=self.dilation_rate, - ) - - if self.use_bias: - outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) - - if self.activation is not None: - return self.activation(outputs) - - return outputs - - @classmethod - def from_config(cls, config): - return cls(**config) - - -def initialize_int8_conv2d(fp32_layer): + ): + super(QConv2D, self).__init__( + name=name, + rank=2, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + activation=activations.get(activation), + use_bias=use_bias, + kernel_initializer=initializers.get(kernel_initializer), + bias_initializer=initializers.get(bias_initializer), + kernel_regularizer=regularizers.get(kernel_regularizer), + bias_regularizer=regularizers.get(bias_regularizer), + activity_regularizer=regularizers.get(activity_regularizer), + kernel_constraint=constraints.get(kernel_constraint), + bias_constraint=constraints.get(bias_constraint), + **kwargs + ) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + kernel = self.kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + kernel_size = self.kernel.shape[-1] + + if not self.weight_min_value: + self.weight_min_value = [-10000]*kernel_size + if not self.weight_max_value: + self.weight_max_value = [10000]*kernel_size + + # add the Q/DQ here + kernel, _, _ = quantization.quantize( + self.kernel, self.weight_min_value, self.weight_max_value, tf.qint8, axis=3, mode="SCALED" + ) + kernel = quantization.dequantize( + kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + outputs = tf.keras.backend.conv2d( + inputs, + kernel, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilation_rate=self.dilation_rate, + ) + + if self.use_bias: + outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) + + if self.activation is not None: + return self.activation(outputs) + + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + + +def initialize_int8_conv2d(fp32_layer, q_config): kwargs = fp32_layer.get_config() if "name" in kwargs: @@ -155,10 +385,6 @@ def initialize_int8_conv2d(fp32_layer): del kwargs["kernel_constraint"] if "bias_constraint" in kwargs: del kwargs["bias_constraint"] - if "min_value" in kwargs: - del kwargs["min_value"] - if "max_value" in kwargs: - del kwargs["max_value"] return QConv2D( name=fp32_layer.name, @@ -178,7 +404,7 @@ def initialize_int8_conv2d(fp32_layer): activity_regularizer=fp32_layer.activity_regularizer, kernel_constraint=fp32_layer.kernel_constraint, bias_constraint=fp32_layer.bias_constraint, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) diff --git a/neural_compressor/tensorflow/keras/layers/dense.py b/neural_compressor/tensorflow/keras/layers/dense.py index 61dfda2a2b8..862ce2e249e 100644 --- a/neural_compressor/tensorflow/keras/layers/dense.py +++ b/neural_compressor/tensorflow/keras/layers/dense.py @@ -22,6 +22,7 @@ from tensorflow.keras import activations, backend, constraints, initializers, regularizers from tensorflow.keras.layers import Dense +from neural_compressor.tensorflow.utils import version1_gte_version2 class QDense(Dense): def __init__( @@ -37,8 +38,17 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - min_value=None, - max_value=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs ): super(QDense, self).__init__( @@ -55,34 +65,74 @@ def __init__( bias_constraint=bias_constraint, **kwargs ) - self.min_value = min_value - self.max_value = max_value + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis def call(self, inputs): - kernel_size = self.kernel.shape[-1] - - if not self.min_value: - self.min_value = [-10000] * kernel_size - if not self.max_value: - self.max_value = [10000] * kernel_size - - # add the Q/DQ here - kernel, _, _ = quantization.quantize( - self.kernel, - self.min_value, - self.max_value, - tf.qint8, - axis=1, - mode="SCALED", - ) + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + kernel = self.kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + kernel_size = self.kernel.shape[-1] + + if not self.weight_min_value: + self.weight_min_value = [-10000] * kernel_size + if not self.weight_max_value: + self.weight_max_value = [10000] * kernel_size + + # add the Q/DQ here + kernel, _, _ = quantization.quantize( + self.kernel, + self.weight_min_value, + self.weight_max_value, + tf.qint8, + axis=1, + mode="SCALED", + ) + kernel = quantization.dequantize( + kernel, + self.weight_min_value, + self.weight_max_value, + axis=1, + mode="SCALED", + ) - kernel = quantization.dequantize( - kernel, - self.min_value, - self.max_value, - axis=1, - mode="SCALED", - ) outputs = tf.keras.backend.dot(inputs, kernel) if self.use_bias: @@ -91,8 +141,33 @@ def call(self, inputs): outputs = self.activation(outputs) return outputs + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QDense, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + + return config + -def initialize_int8_dense(fp32_layer): +def initialize_int8_dense(fp32_layer, q_config): kwargs = fp32_layer.get_config() if "name" in kwargs: @@ -117,10 +192,6 @@ def initialize_int8_dense(fp32_layer): del kwargs["kernel_constraint"] if "bias_constraint" in kwargs: del kwargs["bias_constraint"] - if "min_value" in kwargs: - del kwargs["min_value"] - if "max_value" in kwargs: - del kwargs["max_value"] q_layer = QDense( name=fp32_layer.name, @@ -134,8 +205,8 @@ def initialize_int8_dense(fp32_layer): activity_regularizer=fp32_layer.activity_regularizer, kernel_constraint=fp32_layer.kernel_constraint, bias_constraint=fp32_layer.bias_constraint, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) diff --git a/neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py b/neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py index a3e6dd9b2f4..065de7fc7c1 100644 --- a/neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py +++ b/neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2024 Intel Corporation +# Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,130 +23,364 @@ from neural_compressor.tensorflow.utils import version1_gte_version2 -if version1_gte_version2(tf.__version__, "2.13.0"): +if version1_gte_version2(tf.__version__, "2.16.1"): + from keras.src import ops + from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv # pylint: disable=E0401 +elif version1_gte_version2(tf.__version__, "2.13.0"): from keras.src.layers.convolutional.base_depthwise_conv import DepthwiseConv # pylint: disable=E0401 from keras.src.utils import conv_utils, tf_utils # pylint: disable=E0401 else: from keras.layers.convolutional.base_depthwise_conv import DepthwiseConv # pylint: disable=E0401 from keras.utils import conv_utils, tf_utils # pylint: disable=E0401 +if version1_gte_version2(tf.__version__, "2.16.1"): -class QDepthwiseConv2D(DepthwiseConv): - def __init__( - self, - kernel_size, - strides=(1, 1), - padding="valid", - depth_multiplier=1, - data_format=None, - dilation_rate=(1, 1), - activation=None, - use_bias=True, - depthwise_initializer="glorot_uniform", - bias_initializer="zeros", - depthwise_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - depthwise_constraint=None, - bias_constraint=None, - min_value=None, - max_value=None, - **kwargs - ): - super().__init__( - 2, - kernel_size=kernel_size, - strides=strides, - padding=padding, - depth_multiplier=depth_multiplier, - data_format=data_format, - dilation_rate=dilation_rate, - activation=activation, - use_bias=use_bias, - depthwise_initializer=depthwise_initializer, - bias_initializer=bias_initializer, - depthwise_regularizer=depthwise_regularizer, - bias_regularizer=bias_regularizer, - activity_regularizer=activity_regularizer, - depthwise_constraint=depthwise_constraint, - bias_constraint=bias_constraint, + class QDepthwiseConv2D(BaseDepthwiseConv): + def __init__( + self, + kernel_size, + min_value, + max_value, + strides=(1, 1), + padding="valid", + depth_multiplier=1, + data_format=None, + dilation_rate=(1, 1), + activation=None, + use_bias=True, + depthwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, + **kwargs + ): + super().__init__( + 2, + kernel_size=kernel_size, + strides=strides, + padding=padding, + depth_multiplier=depth_multiplier, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + use_bias=use_bias, + depthwise_initializer=depthwise_initializer, + bias_initializer=bias_initializer, + depthwise_regularizer=depthwise_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + depthwise_constraint=depthwise_constraint, + bias_constraint=bias_constraint, + **kwargs + ) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + kernel = self.kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + # add the Q/DQ here + kernel, _, _ = quantization.quantize( + self.kernel, + self.weight_min_value, + self.weight_max_value, + tf.qint8, + axis=3, + mode="SCALED" + ) + kernel = quantization.dequantize( + kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + + input_channel = self._get_input_channel(inputs.shape) + outputs = ops.depthwise_conv( + inputs, + kernel, + strides=self.strides, + padding=self.padding, + dilation_rate=self.dilation_rate, + data_format=self.data_format, + ) + + if self.use_bias: + if self.data_format == "channels_last": + bias_shape = (1,) * (self.rank + 1) + (self.depth_multiplier * input_channel,) + else: + bias_shape = (1, self.depth_multiplier * input_channel) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + + if self.activation is not None: + return self.activation(outputs) + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + +else: + + class QDepthwiseConv2D(DepthwiseConv): + def __init__( + self, + kernel_size, + min_value, + max_value, + strides=(1, 1), + padding="valid", + depth_multiplier=1, + data_format=None, + dilation_rate=(1, 1), + activation=None, + use_bias=True, + depthwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs - ) - self.min_value = min_value - self.max_value = max_value - - def call(self, inputs): - depthwise_kernel_size = self.depthwise_kernel.shape[-1] - - if not self.min_value: - self.min_value = [-10000] * depthwise_kernel_size - if not self.max_value: - self.max_value = [10000] * depthwise_kernel_size - - # add the Q/DQ here - kernel, _, _ = quantization.quantize( - self.depthwise_kernel, self.min_value, self.max_value, tf.qint8, axis=3, mode="SCALED" - ) - kernel = quantization.dequantize( - kernel, - self.min_value, - self.max_value, - axis=3, - mode="SCALED", - ) - outputs = tf.keras.backend.depthwise_conv2d( - inputs, - kernel, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilation_rate=self.dilation_rate, - ) - - if self.use_bias: - outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) - - if self.activation is not None: - return self.activation(outputs) - - return outputs - - @classmethod - def from_config(cls, config): - return cls(**config) - - @tf_utils.shape_type_conversion - def compute_output_shape(self, input_shape): - if self.data_format == "channels_first": - rows = input_shape[2] - cols = input_shape[3] - out_filters = input_shape[1] * self.depth_multiplier - elif self.data_format == "channels_last": - rows = input_shape[1] - cols = input_shape[2] - out_filters = input_shape[3] * self.depth_multiplier - - rows = conv_utils.conv_output_length( - rows, - self.kernel_size[0], - self.padding, - self.strides[0], - self.dilation_rate[0], - ) - cols = conv_utils.conv_output_length( - cols, - self.kernel_size[1], - self.padding, - self.strides[1], - self.dilation_rate[1], - ) - if self.data_format == "channels_first": - return (input_shape[0], out_filters, rows, cols) - elif self.data_format == "channels_last": - return (input_shape[0], rows, cols, out_filters) - - -def initialize_int8_depthwise_conv2d(fp32_layer): + ): + super().__init__( + 2, + kernel_size=kernel_size, + strides=strides, + padding=padding, + depth_multiplier=depth_multiplier, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + use_bias=use_bias, + depthwise_initializer=depthwise_initializer, + bias_initializer=bias_initializer, + depthwise_regularizer=depthwise_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + depthwise_constraint=depthwise_constraint, + bias_constraint=bias_constraint, + **kwargs + ) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + depthwise_kernel = self.depthwise_kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + # add the Q/DQ here + depthwise_kernel, _, _ = quantization.quantize( + self.depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + tf.qint8, + axis=3, + mode="SCALED" + ) + depthwise_kernel = quantization.dequantize( + depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + + outputs = tf.keras.backend.depthwise_conv2d( + inputs, + depthwise_kernel, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilation_rate=self.dilation_rate, + ) + + if self.use_bias: + outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) + + if self.activation is not None: + return self.activation(outputs) + + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + + @tf_utils.shape_type_conversion + def compute_output_shape(self, input_shape): + if self.data_format == "channels_first": + rows = input_shape[2] + cols = input_shape[3] + out_filters = input_shape[1] * self.depth_multiplier + elif self.data_format == "channels_last": + rows = input_shape[1] + cols = input_shape[2] + out_filters = input_shape[3] * self.depth_multiplier + + rows = conv_utils.conv_output_length( + rows, + self.kernel_size[0], + self.padding, + self.strides[0], + self.dilation_rate[0], + ) + cols = conv_utils.conv_output_length( + cols, + self.kernel_size[1], + self.padding, + self.strides[1], + self.dilation_rate[1], + ) + if self.data_format == "channels_first": + return (input_shape[0], out_filters, rows, cols) + elif self.data_format == "channels_last": + return (input_shape[0], rows, cols, out_filters) + +def initialize_int8_depthwise_conv2d(fp32_layer, q_config): kwargs = fp32_layer.get_config() q_name = fp32_layer.name @@ -204,7 +438,7 @@ def initialize_int8_depthwise_conv2d(fp32_layer): activity_regularizer=fp32_layer.activity_regularizer, depthwise_constraint=fp32_layer.depthwise_constraint, bias_constraint=fp32_layer.bias_constraint, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) diff --git a/neural_compressor/tensorflow/keras/layers/layer_initializer.py b/neural_compressor/tensorflow/keras/layers/layer_initializer.py index d1db0eb3504..99ad7d58d52 100644 --- a/neural_compressor/tensorflow/keras/layers/layer_initializer.py +++ b/neural_compressor/tensorflow/keras/layers/layer_initializer.py @@ -30,4 +30,4 @@ "QDepthwiseConv2D": initialize_int8_depthwise_conv2d, "QConv2D": initialize_int8_conv2d, "QDense": initialize_int8_dense, -} +} \ No newline at end of file diff --git a/neural_compressor/tensorflow/keras/layers/pool2d.py b/neural_compressor/tensorflow/keras/layers/pool2d.py index 05a028ecc83..9daecca1395 100644 --- a/neural_compressor/tensorflow/keras/layers/pool2d.py +++ b/neural_compressor/tensorflow/keras/layers/pool2d.py @@ -31,15 +31,89 @@ def __init__( strides=None, padding="valid", data_format=None, - min_value=-10000, - max_value=10000, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs ): super(QAvgPool2D, self).__init__( name=name, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs ) - self.min_value = min_value - self.max_value = max_value + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def __call__(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + elif self.quant_status == "quantize" and not isinstance(inputs, tf.keras.KerasTensor): + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + return super(QAvgPool2D, self).__call__(inputs) + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QAvgPool2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config class QMaxPool2D(MaxPooling2D): @@ -50,18 +124,92 @@ def __init__( strides=None, padding="valid", data_format=None, - min_value=-10000, - max_value=10000, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs ): super(QMaxPool2D, self).__init__( name=name, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs ) - self.min_value = min_value - self.max_value = max_value + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def __call__(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + elif self.quant_status == "quantize" and not isinstance(inputs, tf.keras.KerasTensor): + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + return super(QMaxPool2D, self).__call__(inputs) + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QMaxPool2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + return config -def initialize_int8_avgpool(fp32_layer): +def initialize_int8_avgpool(fp32_layer, q_config): kwargs = fp32_layer.get_config() if "name" in kwargs: @@ -74,10 +222,6 @@ def initialize_int8_avgpool(fp32_layer): del kwargs["padding"] if "data_format" in kwargs: del kwargs["data_format"] - if "min_value" in kwargs: - del kwargs["min_value"] - if "max_value" in kwargs: - del kwargs["max_value"] q_layer = QAvgPool2D( name=fp32_layer.name, @@ -85,15 +229,15 @@ def initialize_int8_avgpool(fp32_layer): strides=fp32_layer.strides, padding=fp32_layer.padding, data_format=fp32_layer.data_format, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) return q_layer -def initialize_int8_maxpool(fp32_layer): +def initialize_int8_maxpool(fp32_layer, q_config): kwargs = fp32_layer.get_config() if "name" in kwargs: @@ -106,10 +250,6 @@ def initialize_int8_maxpool(fp32_layer): del kwargs["padding"] if "data_format" in kwargs: del kwargs["data_format"] - if "min_value" in kwargs: - del kwargs["min_value"] - if "max_value" in kwargs: - del kwargs["max_value"] q_layer = QMaxPool2D( name=fp32_layer.name, @@ -117,8 +257,8 @@ def initialize_int8_maxpool(fp32_layer): strides=fp32_layer.strides, padding=fp32_layer.padding, data_format=fp32_layer.data_format, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) diff --git a/neural_compressor/tensorflow/keras/layers/separable_conv2d.py b/neural_compressor/tensorflow/keras/layers/separable_conv2d.py index 7df66d9db49..a16f9dd4220 100644 --- a/neural_compressor/tensorflow/keras/layers/separable_conv2d.py +++ b/neural_compressor/tensorflow/keras/layers/separable_conv2d.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2024 Intel Corporation +# Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,118 +23,358 @@ from neural_compressor.tensorflow.utils import version1_gte_version2 -if version1_gte_version2(tf.__version__, "2.13.0"): +if version1_gte_version2(tf.__version__, "2.16.1"): + from keras.src import ops + from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv # pylint: disable=E0401 +elif version1_gte_version2(tf.__version__, "2.13.0"): from keras.src.layers.convolutional.base_separable_conv import SeparableConv # pylint: disable=E0401 from keras.src.utils import conv_utils # pylint: disable=E0401 else: from keras.layers.convolutional.base_separable_conv import SeparableConv # pylint: disable=E0401 from keras.utils import conv_utils # pylint: disable=E0401 +if version1_gte_version2(tf.__version__, "2.16.1"): -class QSeparableConv2D(SeparableConv): - def __init__( - self, - name, - filters, - kernel_size, - strides=(1, 1), - padding="valid", - data_format=None, - dilation_rate=(1, 1), - depth_multiplier=1, - activation=None, - use_bias=True, - depthwise_initializer="glorot_uniform", - pointwise_initializer="glorot_uniform", - bias_initializer="zeros", - depthwise_regularizer=None, - pointwise_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - depthwise_constraint=None, - pointwise_constraint=None, - bias_constraint=None, - min_value=None, - max_value=None, - **kwargs - ): - super().__init__( - name=name, - rank=2, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding=padding, - data_format=data_format, - dilation_rate=dilation_rate, - depth_multiplier=depth_multiplier, - activation=activations.get(activation), - use_bias=use_bias, - depthwise_initializer=initializers.get(depthwise_initializer), - pointwise_initializer=initializers.get(pointwise_initializer), - bias_initializer=initializers.get(bias_initializer), - depthwise_regularizer=regularizers.get(depthwise_regularizer), - pointwise_regularizer=regularizers.get(pointwise_regularizer), - bias_regularizer=regularizers.get(bias_regularizer), - activity_regularizer=regularizers.get(activity_regularizer), - depthwise_constraint=constraints.get(depthwise_constraint), - pointwise_constraint=constraints.get(pointwise_constraint), - bias_constraint=constraints.get(bias_constraint), + class QSeparableConv2D(BaseSeparableConv): + def __init__( + self, + filters, + kernel_size, + min_value, + max_value, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + depth_multiplier=1, + activation=None, + use_bias=True, + depthwise_initializer="glorot_uniform", + pointwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + pointwise_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, + **kwargs + ): + super().__init__( + rank=2, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + depth_multiplier=depth_multiplier, + activation=activations.get(activation), + use_bias=use_bias, + depthwise_initializer=initializers.get(depthwise_initializer), + pointwise_initializer=initializers.get(pointwise_initializer), + bias_initializer=initializers.get(bias_initializer), + depthwise_regularizer=regularizers.get(depthwise_regularizer), + pointwise_regularizer=regularizers.get(pointwise_regularizer), + bias_regularizer=regularizers.get(bias_regularizer), + activity_regularizer=regularizers.get(activity_regularizer), + depthwise_constraint=constraints.get(depthwise_constraint), + pointwise_constraint=constraints.get(pointwise_constraint), + bias_constraint=constraints.get(bias_constraint), + **kwargs + ) + + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + depthwise_kernel = self.depthwise_kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + # (TODO) it's ugly that we can't get the point_wise min/max here + depthwise_kernel, _, _ = quantization.quantize( + self.depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + tf.qint8, + axis=3, + mode="SCALED" + ) + depthwise_kernel = quantization.dequantize( + depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + + outputs = ops.separable_conv( + inputs, + depthwise_kernel, + self.pointwise_kernel, + strides=self.strides, + padding=self.padding, + dilation_rate=self.dilation_rate, + data_format=self.data_format, + ) + + if self.use_bias: + if self.data_format == "channels_last": + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(self.bias, bias_shape) + outputs += bias + + if self.activation is not None: + return self.activation(outputs) + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + +else: + + class QSeparableConv2D(SeparableConv): + def __init__( + self, + filters, + kernel_size, + min_value, + max_value, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), + depth_multiplier=1, + activation=None, + use_bias=True, + depthwise_initializer="glorot_uniform", + pointwise_initializer="glorot_uniform", + bias_initializer="zeros", + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + pointwise_constraint=None, + bias_constraint=None, + act_min_value=None, + act_max_value=None, + weight_min_value=None, + weight_max_value=None, + granularity="per_tensor", + quant_status="calib", + quant_mode="SCALED", + quant_T="s8", + quant_round_mode="HALF_AWAY_FROM_ZERO", + quant_narrow_range=False, + quant_axis=None, **kwargs - ) - - self.min_value = min_value - self.max_value = max_value - - def call(self, inputs): - depthwise_kernel_size = self.depthwise_kernel.shape[-1] - - if not self.min_value: - self.min_value = [-10000] * depthwise_kernel_size - if not self.max_value: - self.max_value = [10000] * depthwise_kernel_size - - # TODO it's ugly that we can't get the point_wise min/max here - depthwise_kernel, _, _ = quantization.quantize( - self.depthwise_kernel, self.min_value, self.max_value, tf.qint8, axis=3, mode="SCALED" - ) - depthwise_kernel = quantization.dequantize( - depthwise_kernel, - self.min_value, - self.max_value, - axis=3, - mode="SCALED", - ) - - if self.data_format == "channels_last": - strides = (1,) + self.strides + (1,) - else: - strides = (1, 1) + self.strides - - outputs = tf.compat.v1.nn.separable_conv2d( - inputs, - depthwise_kernel, - self.pointwise_kernel, - strides=strides, - padding=self.padding.upper(), - rate=self.dilation_rate, - data_format=conv_utils.convert_data_format(self.data_format, ndim=4), - ) - - if self.use_bias: - outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) - - if self.activation is not None: - return self.activation(outputs) - - return outputs - - @classmethod - def from_config(cls, config): - return cls(**config) - - -def initialize_int8_separable_conv2d(fp32_layer): + ): + super().__init__( + rank=2, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + depth_multiplier=depth_multiplier, + activation=activations.get(activation), + use_bias=use_bias, + depthwise_initializer=initializers.get(depthwise_initializer), + pointwise_initializer=initializers.get(pointwise_initializer), + bias_initializer=initializers.get(bias_initializer), + depthwise_regularizer=regularizers.get(depthwise_regularizer), + pointwise_regularizer=regularizers.get(pointwise_regularizer), + bias_regularizer=regularizers.get(bias_regularizer), + activity_regularizer=regularizers.get(activity_regularizer), + depthwise_constraint=constraints.get(depthwise_constraint), + pointwise_constraint=constraints.get(pointwise_constraint), + bias_constraint=constraints.get(bias_constraint), + **kwargs + ) + T_map = {"s8": tf.qint8, "u8": tf.quint8} + self.reverse_T_map = {tf.qint8: "s8", tf.quint8: "u8"} + self.weight_min_value = weight_min_value + self.weight_max_value = weight_max_value + self.act_min_value = act_min_value + self.act_max_value = act_max_value + self.granularity = granularity + self.quant_status= quant_status + self.quant_mode = quant_mode + self.quant_T = T_map[quant_T] + self.quant_round_mode = quant_round_mode + self.quant_narrow_range = quant_narrow_range + self.quant_axis = quant_axis + + def call(self, inputs): + if self.quant_status == "calib" and not isinstance(inputs, tf.keras.KerasTensor): + if self.granularity == "per_tensor": + self.act_min_value = tf.math.reduce_min(inputs) + self.act_max_value = tf.math.reduce_max(inputs) + else: + self.act_min_value = tf.math.reduce_min(inputs, axis=self.axis) + self.act_max_value = tf.math.reduce_max(inputs, axis=self.axis) + depthwise_kernel = self.depthwise_kernel + elif self.quant_status == "quantize": + assert self.act_min_value is not None, "Invalid activation min-max values, please check calibration process" + inputs, _, _ = tf.quantization.quantize( + inputs, + self.act_min_value, + self.act_max_value, + self.quant_T, + mode=self.quant_mode, + round_mode=self.quant_round_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + inputs = tf.quantization.dequantize( + inputs, + self.act_min_value, + self.act_max_value, + mode=self.quant_mode, + narrow_range=self.quant_narrow_range, + axis=self.quant_axis, + ) + + # (TODO) it's ugly that we can't get the point_wise min/max here + depthwise_kernel, _, _ = quantization.quantize( + self.depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + tf.qint8, + axis=3, + mode="SCALED" + ) + depthwise_kernel = quantization.dequantize( + depthwise_kernel, + self.weight_min_value, + self.weight_max_value, + axis=3, + mode="SCALED", + ) + + if self.data_format == "channels_last": + strides = (1,) + self.strides + (1,) + else: + strides = (1, 1) + self.strides + + outputs = tf.compat.v1.nn.separable_conv2d( + inputs, + depthwise_kernel, + self.pointwise_kernel, + strides=strides, + padding=self.padding.upper(), + rate=self.dilation_rate, + data_format=conv_utils.convert_data_format(self.data_format, ndim=4), + ) + + if self.use_bias: + outputs = tf.keras.backend.bias_add(outputs, self.bias, data_format=self.data_format) + + if self.activation is not None: + return self.activation(outputs) + + return outputs + + @classmethod + def from_config(cls, config): + return cls(**config) + + def get_config(self): + config = super(QConv2D, self).get_config() + config.update( + { + "act_min_value": self.act_min_value, + "act_max_value": self.act_max_value, + "weight_min_value": self.weight_min_value, + "weight_max_value": self.weight_max_value, + "granularity": self.granularity, + "quant_status": self.quant_status, + "quant_mode": self.quant_mode, + "quant_T": self.reverse_T_map[self.quant_T], + "quant_round_mode": self.quant_round_mode, + "quant_narrow_range": self.quant_narrow_range, + "quant_axis": self.quant_axis, + } + ) + + return config + + +def initialize_int8_separable_conv2d(fp32_layer, q_config): kwargs = fp32_layer.get_config() if "name" in kwargs: @@ -203,7 +443,7 @@ def initialize_int8_separable_conv2d(fp32_layer): depthwise_constraint=fp32_layer.depthwise_constraint, pointwise_constraint=fp32_layer.pointwise_constraint, bias_constraint=fp32_layer.bias_constraint, - min_value=fp32_layer.min_value, - max_value=fp32_layer.max_value, + quant_T=q_config["T"], + granularity=q_config["granularity"], **kwargs ) diff --git a/neural_compressor/tensorflow/keras/quantization/config.py b/neural_compressor/tensorflow/keras/quantization/config.py index ae532dc63c4..0581ba74381 100644 --- a/neural_compressor/tensorflow/keras/quantization/config.py +++ b/neural_compressor/tensorflow/keras/quantization/config.py @@ -114,13 +114,13 @@ def register_supported_configs(cls) -> List[OperatorConfig]: def get_model_info(model) -> List[Tuple[str, Callable]]: white_list = [ "Dense", - "Conv2D", - "DepthwiseConv2D", - "SeparableConv2D", - "AvgPool2D", - "AveragePooling2D", - "MaxPool2D", - "MaxPooling2D", + # "Conv2d", + # "DepthwiseConv2D", + # "SeparableConv2D", + # "AvgPool2D", + # "AveragePooling2D", + # "MaxPool2D", + # "MaxPooling2D", ] filter_result = [] diff --git a/neural_compressor/tensorflow/quantization/algorithm_entry.py b/neural_compressor/tensorflow/quantization/algorithm_entry.py index a0baf490390..f32f769e55b 100644 --- a/neural_compressor/tensorflow/quantization/algorithm_entry.py +++ b/neural_compressor/tensorflow/quantization/algorithm_entry.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024 Intel Corporation +# Copyright (c) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,9 +17,9 @@ from neural_compressor.common.base_config import BaseConfig from neural_compressor.common.utils import SMOOTH_QUANT, STATIC_QUANT -from neural_compressor.tensorflow.algorithms import KerasAdaptor, Tensorflow_ITEXAdaptor, TensorFlowAdaptor +from neural_compressor.tensorflow.algorithms import KerasAdaptor, TensorFlowAdaptor, Tensorflow_ITEXAdaptor from neural_compressor.tensorflow.quantization.config import SmoothQuantConfig -from neural_compressor.tensorflow.utils import BaseModel, KerasModel, TFConfig, register_algo +from neural_compressor.tensorflow.utils import BaseModel, KerasModel, framework_specific_info, register_algo @register_algo(name=STATIC_QUANT) @@ -42,14 +42,13 @@ def static_quant_entry( """ if isinstance(model, KerasModel): framework = KerasAdaptor - elif TFConfig.global_config["backend"] == "itex": + elif framework_specific_info["backend"] == "itex": framework = Tensorflow_ITEXAdaptor else: framework = TensorFlowAdaptor - quantizer = framework(TFConfig.global_config) + quantizer = framework(framework_specific_info) q_model = quantizer.quantize(quant_config, model, calib_dataloader, calib_iteration) - TFConfig.reset_global_config() return q_model diff --git a/neural_compressor/tensorflow/quantization/config.py b/neural_compressor/tensorflow/quantization/config.py index a49832b6bad..46aebaa2c46 100644 --- a/neural_compressor/tensorflow/quantization/config.py +++ b/neural_compressor/tensorflow/quantization/config.py @@ -107,11 +107,11 @@ def __init__( def register_supported_configs(cls) -> List[OperatorConfig]: supported_configs = [] static_quant_config = StaticQuantConfig( - weight_dtype=["int8", "bf16", "fp32"], + weight_dtype=["int8", "fp32"], weight_sym=[True, False], weight_granularity=["per_tensor", "per_channel"], weight_algorithm=["minmax", "kl"], - act_dtype=["int8", "bf16", "fp32"], + act_dtype=["int8", "fp32"], act_sym=[True, False], act_granularity=["per_tensor", "per_channel"], act_algorithm=["minmax", "kl"], @@ -137,11 +137,10 @@ def register_supported_configs(cls) -> List[OperatorConfig]: @staticmethod def get_model_info(model) -> List[Tuple[str, Callable]]: white_list = [ + "MatMul", "Conv2D", - "FusedBatchNormV3", "Conv3D", "_MklFusedInstanceNorm", - "MatMul", "BatchMatMul", "BatchMatMulV2", "DepthwiseConv2dNative", @@ -151,8 +150,9 @@ def get_model_info(model) -> List[Tuple[str, Callable]]: "MaxPool", "MaxPool3D", "AvgPool", + "_MklFusedInstanceNorm", "Conv2DBackpropInput", - "Conv3DBackpropInputV2", + "Conv2DBackpropInputV2", ] filter_result = [] for node in model.graph_def.node: diff --git a/neural_compressor/tensorflow/quantization/utils/graph_converter.py b/neural_compressor/tensorflow/quantization/utils/graph_converter.py index 30295005686..4a503b864de 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_converter.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_converter.py @@ -243,6 +243,7 @@ def _inference(self, model): # INC needs turn off ITEX optimization pass in calibration stage. # TODO ITEX will provide API to replace setting environment variable. os.environ["ITEX_REMAPPER"] = "0" + sess = model.sess iter_op = model.iter_op input_tensor = model.input_tensor @@ -328,6 +329,9 @@ def check_shape(tensor, data): os.environ["ITEX_REMAPPER"] = "1" def _inference_llm(self, model): + logger.info("Start sampling on calibration dataset.") + f=tf.io.gfile.GFile('calib_qdq.pb','wb') + f.write(model.graph_def.SerializeToString()) input_tensor_names = model.input_tensor_names auto_trackable = model.model infer = auto_trackable.signatures["serving_default"] @@ -340,7 +344,7 @@ def _inference_llm(self, model): for i, input_tensor_name in enumerate(input_tensor_names): feed_dict[input_tensor_name] = inputs[i] - _ = infer(**feed_dict) + pred = infer(**feed_dict) if idx >= self.calib_iteration: break @@ -870,7 +874,7 @@ def _insert_qdq_pairs(self): ) self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) - + # Find out the quantized nodes self.quantized_node_info = OptimizeQDQGraph( self._tmp_graph_def, @@ -898,6 +902,7 @@ def _insert_qdq_pairs(self): # TODO: this is a workaround to make Min/Max node be completely eliminated in int8 graph # after enabling pad+conv2d in new API. non_pad_ops = list(list(set(self.fp32_ops).union(set(self.bf16_ops)))) + sampling_graph_def = FusePadWithFP32Conv2DOptimizer( sampling_graph_def, non_pad_ops, self._tmp_model.input_node_names, self.op_wise_config, self.new_api, True ).do_transformation() @@ -912,6 +917,7 @@ def _insert_qdq_pairs(self): sampling_graph_def.library.CopyFrom(self.model.graph_def.library) self._sampling_model.graph_def = sampling_graph_def self._sampling_model.output_tensor_names = output_tensor_names + tmp_dump_file = tempfile.mkstemp(suffix=".log")[1] with CaptureOutputToFile(tmp_dump_file): self._inference(self._sampling_model) @@ -944,7 +950,7 @@ def _insert_qdq_pairs(self): def _convert_qdq(self): """Convert Dequantize + Op + QuantizeV2 into QuantizedOps.""" - if self.itex_mode: + if self.itex_mode or self._tmp_model.model_type=="llm_saved_model": self._tmp_graph_def, quantizev2_max = FreezeValueTransformer( self._tmp_graph_def, self._calibration_data, "__max:", self.itex_mode ).do_transformation() @@ -969,8 +975,9 @@ def _convert_qdq(self): ).do_transformation() self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() - self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() - + # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() + from neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer + self._tmp_graph_def = ConvertUniformQDQOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def self._tmp_model.graph_def.library.CopyFrom(self.model.graph_def.library) diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv.py index e894ee8d3cb..abedcab5445 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv.py @@ -49,6 +49,7 @@ def do_transformation(self): ) padding_tensor_dict = {} + for node_combination in target_nodes: conv_name = node_combination[1] @@ -59,16 +60,16 @@ def do_transformation(self): is_perchannel = self.cfg[conv_name][0] - # Line 55 to line 65 should be removed once the TFDO enabling the single quantized - # conv2D supporting. - if len(pattern) == 2: - # TODO we need to enable single quantizedconv2d with s8 input. - if not is_perchannel and not cur_graph.has_positive_input(conv_name): - continue - # TFDO has the limitation that the single QuantizedConv2DPerchannel doesn't - # support padding_list filed. - if is_perchannel: - continue + # # Line 55 to line 65 should be removed once the TFDO enabling the single quantized + # # conv2D supporting. + # if len(pattern) == 2: + # # TODO we need to enable single quantizedconv2d with s8 input. + # if not is_perchannel and not cur_graph.has_positive_input(conv_name): + # continue + # # TFDO has the limitation that the single QuantizedConv2DPerchannel doesn't + # # support padding_list filed. + # if is_perchannel: + # continue if conv_name in self.excluded_conv: continue diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv.py index c3cf5a4b62c..97a823d72df 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv.py @@ -59,16 +59,16 @@ def do_transformation(self): is_perchannel = self.cfg[conv_name][0] - # Line 55 to line 65 should be removed once the TFDO enabling the single quantized - # conv2D supporting. - if len(pattern) == 2: - # TODO we need to enable single quantizedconv2d with s8 input. - if not is_perchannel and not cur_graph.has_positive_input(conv_name): - continue - # TFDO has the limitation that the single QuantizedConv2DPerchannel doesn't - # support padding_list filed. - if is_perchannel: - continue + # # Line 55 to line 65 should be removed once the TFDO enabling the single quantized + # # conv2D supporting. + # if len(pattern) == 2: + # # TODO we need to enable single quantizedconv2d with s8 input. + # if not is_perchannel and not cur_graph.has_positive_input(conv_name): + # continue + # # TFDO has the limitation that the single QuantizedConv2DPerchannel doesn't + # # support padding_list filed. + # if is_perchannel: + # continue if conv_name in self.excluded_conv: continue diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py new file mode 100644 index 00000000000..161f0082ec9 --- /dev/null +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fuse QuantizedMatMul with Requantize/Dequantize Graph Rewriter.""" + +import numpy as np +import tensorflow as tf +from tensorflow.core.framework import attr_value_pb2, node_def_pb2 +from tensorflow.python.framework import dtypes, tensor_util + +from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer +from neural_compressor.tensorflow.quantization.utils.graph_util import GraphRewriterHelper as Helper +from neural_compressor.tensorflow.utils import version1_gt_version2, version1_lt_version2 + +from ..graph_base import GraphRewriterBase + + +class ConvertUniformQDQOptimizer(GraphRewriterBase): + """Fuse newAPI Quantized MatMul Op with the successor Requantize Op.""" + + def __init__(self, model, device="cpu"): + """Initialization.""" + super().__init__(model) + self.device = device + self.graph_analyzer = GraphAnalyzer() + self.graph_analyzer.graph = self.model + self.eps = 1e-05 + self.graph_info = self.graph_analyzer.parse_graph() + + self.uint8_type = dtypes.quint8.as_datatype_enum + self.int8_type = dtypes.qint8.as_datatype_enum + self.float32_type = dtypes.float32.as_datatype_enum + self.qint32_type = dtypes.qint32.as_datatype_enum + + self.quantization_min_val = None + self.quantization_max_val = None + + def _calculate_zp_and_scale(self, min_value, max_value, dtype): + if dtype == attr_value_pb2.AttrValue(type=self.int8_type): + zp = 0 + scale_range = 127 + self.quantization_min_val = -127 + self.quantization_max_val = 128 + elif dtype == attr_value_pb2.AttrValue(type=self.uint8_type): + zp = 128 + scale_range = 255 + self.quantization_min_val = 0 + self.quantization_max_val = 255 + else: + raise ValueError("Unexpected data type for Quantize Op.") + + if isinstance(max_value, float): + return zp, max(abs(max_value), abs(min_value))/scale_range + + scales = [] + zero_points = [] + for i in range(len(max_value)): + scales.append(max(abs(max_value[i]), abs(min_value[i]))/scale_range) + zero_points.append(zp) + + return zero_points, scales + + def do_transformation(self): + """Fuse the quantized op with the following requantize op. + + Returns: + [graphdef]: the optimized graphdef object + """ + target_nodes = self.graph_analyzer.query_fusion_pattern_nodes( + [["QuantizeV2"], ["Dequantize"]] + ) + for i in target_nodes: + shared_quantize_node = False + quantize_node_name = i[0] + dequantize_node_name = i[1] + dequantize_node = self.graph_info[dequantize_node_name].node + # if quantize_node_name in self.graph_info: + quantize_node = self.graph_info[quantize_node_name].node + quantize_min_name = quantize_node.input[1] + quantize_max_name = quantize_node.input[2] + + dtype = quantize_node.attr["T"] + + min_value = self.graph_info[quantize_min_name].node.attr["value"].tensor.float_val[0] + max_value = self.graph_info[quantize_max_name].node.attr["value"].tensor.float_val[0] + + zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype) + zero_point_name = quantize_min_name[:-4] + "zero_point" + scale_name = quantize_min_name[:-4] + "scale" + + zero_point_node = Helper.create_constant_node(zero_point_name, zero_point_value, dtypes.int32, device="cpu") + scale_node = Helper.create_constant_node(scale_name, scale_value, dtypes.float32, device="cpu") + + uniform_quantize_node = node_def_pb2.NodeDef() + uniform_quantize_node.op = "UniformQuantize" + uniform_quantize_node.name = quantize_node_name+"_UniformQuantize" + uniform_quantize_node.input.extend([quantize_node.input[0], scale_name, zero_point_name]) + Helper.set_attr_int(uniform_quantize_node, "quantization_min_val", self.quantization_min_val) + Helper.set_attr_int(uniform_quantize_node, "quantization_max_val", self.quantization_max_val) + Helper.set_attr_dtype(uniform_quantize_node, "Tin", dtypes.float32) + + if "axis" in quantize_node.attr: + uniform_quantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["axis"]) + uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) + + + uniform_dequantize_node = node_def_pb2.NodeDef() + uniform_dequantize_node.op = "UniformDequantize" + uniform_dequantize_node.name = dequantize_node_name+"_UniformDequantize" + + + uniform_dequantize_node.input.extend([uniform_quantize_node.name, + scale_name, + zero_point_name, + ]) + Helper.set_attr_int(uniform_dequantize_node, "quantization_min_val", self.quantization_min_val) + Helper.set_attr_int(uniform_dequantize_node, "quantization_max_val", self.quantization_max_val) + Helper.set_attr_dtype(uniform_dequantize_node, "Tout", dtypes.float32) + + if "quantization_axis" in quantize_node.attr: + uniform_dequantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["quantization_axis"]) + if "Tin" in uniform_quantize_node.attr: + uniform_dequantize_node.attr["Tin"].CopyFrom(uniform_quantize_node.attr["Tout"]) + # if not shared_quantize_node: + parent_node_name = Helper.node_name_from_input(quantize_node.input[0]) + + self.graph_analyzer.add_node(zero_point_node, None, [uniform_quantize_node.name]) + self.graph_analyzer.add_node(scale_node, None, [uniform_quantize_node.name]) + + quantize_output_node_name = set() + for node_name in self.graph_info[quantize_node_name].outputs: + quantize_output_node_name.add(node_name) + self.graph_analyzer.replace_single_node( + uniform_quantize_node, + [parent_node_name], + quantize_node_name, + [i for i in quantize_output_node_name], + quantize_node_name, + ) + + dequantize_output_node_name = set() + for node_name in self.graph_info[dequantize_node_name].outputs: + dequantize_output_node_name.add(node_name) + self.graph_analyzer.replace_single_node( + uniform_dequantize_node, + [uniform_quantize_node.name], + dequantize_node_name, + [i for i in dequantize_output_node_name], + dequantize_node_name, + ) + + self.graph_analyzer.remove_node(quantize_node_name) + self.graph_analyzer.remove_node(dequantize_node_name) + + + return self.graph_analyzer.dump_graph() diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize.py index 2fef260b500..27b3b998024 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize.py @@ -263,14 +263,10 @@ def do_transformation(self): weight_node = self.graph_info[new_node.input[1]].node bias_node = self.graph_info[new_node.input[2]].node + max_input_node = self.graph_info[last_node.input[-1]].node + min_input_node = self.graph_info[last_node.input[-2]].node - max_input_node = None - min_input_node = None - if last_node.op.find("Requantize") != -1 or last_node.op.find("QuantizeV2") != -1: - max_input_node = self.graph_info[last_node.input[-1]].node - min_input_node = self.graph_info[last_node.input[-2]].node - - if max_input_node and max_input_node.op == "Enter": # pragma: no cover + if max_input_node.op == "Enter": # pragma: no cover min_input_parent_name = Helper.node_name_from_input(min_input_node.input[0]) max_input_parent_name = Helper.node_name_from_input(max_input_node.input[0]) min_input_parent_node = self.graph_info[min_input_parent_name].node diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern.py index 64fc4a69dac..949246fead8 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern.py @@ -551,6 +551,8 @@ def _insert_qdq_pattern_for_weight_node( min_value = -range_value max_value = range_value elif weight_node.op == "ReadVariableOp": + if not self.llm_weight_minmax: + return min_value = self.llm_weight_minmax[weight_node.name][0] max_value = self.llm_weight_minmax[weight_node.name][1] min_value *= range_coefficent diff --git a/neural_compressor/tensorflow/quantization/utils/graph_util.py b/neural_compressor/tensorflow/quantization/utils/graph_util.py index 3eb99baeccf..b8d1ae91f9d 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_util.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_util.py @@ -541,6 +541,8 @@ def replace_single_node( self.node_name_details[each_input_node_name].node.ClearField("input") self.node_name_details[each_input_node_name].node.input.extend(new_input_name) + + def replace_node(self, new_node, old_node_name, output_nodes_name): """Replace the node into the internal data structure node_name_details. @@ -705,6 +707,7 @@ def parse_graph(self, input_graph_def=None): for each_input in node_details.node.input: self.node_name_details[GraphRewriterHelper.node_name_from_input(each_input)].outputs.append(node_name) + return self.node_name_details diff --git a/neural_compressor/tensorflow/quantization/utils/utility.py b/neural_compressor/tensorflow/quantization/utils/utility.py index 84ae1fb1915..ac6ceb54324 100644 --- a/neural_compressor/tensorflow/quantization/utils/utility.py +++ b/neural_compressor/tensorflow/quantization/utils/utility.py @@ -37,6 +37,21 @@ from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer, GraphRewriterHelper +def disable_random(seed=1): + """A Decorator to disable tf random seed.""" + + def decorator(func): + def wrapper(*args, **kw): + tf.compat.v1.disable_eager_execution() + tf.compat.v1.reset_default_graph() + tf.compat.v1.set_random_seed(seed) + return func(*args, **kw) + + return wrapper + + return decorator + + def read_graph(in_graph, in_graph_is_binary=True): """Reads input graph file as GraphDef. @@ -308,45 +323,6 @@ def strip_unused_nodes(graph_def, input_node_names, output_node_names): return tf.compat.v1.graph_util.extract_sub_graph(cur_graph.dump_graph(), output_node_names) -def get_estimator_graph(estimator, input_fn): - """Get the graph of the estimator. - - Args: - estimator: tf estimator model - input_fn: input function - - Returns: - graph - """ - with tf.Graph().as_default() as g: - features, input_hooks = estimator._get_features_from_input_fn(input_fn, tf.estimator.ModeKeys.PREDICT) - estimator_spec = estimator._call_model_fn(features, None, tf.estimator.ModeKeys.PREDICT, estimator.config) - - outputs = ( - [tensor.name for tensor in estimator_spec.predictions.values()] - if isinstance(estimator_spec.predictions, dict) - else [estimator_spec.predictions.name] - ) - logger.info("Estimator output tensor names is {}.".format(outputs)) - with tf.compat.v1.Session(graph=g) as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - # Freezing a graph requires output_node_names, which can be found in - # estimator_spec.predictions that contains prediction tensors as a - # dictionary - # When a model uses Iterator, we need to have 'MakeIterator' (default - # name used by TF) in the output_node_names as well. - output_nodes = list(set([output.split(":")[0] for output in outputs])) - if "MakeIterator" in [node.op for node in g.as_graph_def().node]: - output_nodes.append("MakeIterator") - - graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, g.as_graph_def(), output_nodes) - - graph = tf.Graph() - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - def strip_equivalent_nodes(graph_def, output_node_names): """Strip nodes with the same input and attr.""" stripped_graph = GraphAnalyzer() @@ -440,8 +416,6 @@ def get_model_input_shape(model): _shape = [item.value for item in _shape] if len(_shape) > 1 and isinstance(_shape[0], int): return _shape[0] - elif isinstance(_shape, list) and hasattr(_shape[0], "value"): - return _shape[0].value return 1 @@ -474,6 +448,40 @@ def int8_node_name_reverse(node): return node_name +def tf_diagnosis_helper(fp32_model, quan_model, tune_cfg, save_path): + """Tensorflow diagnosis helper function.""" + from ...utils.utility import dump_data_to_local + + fp32_node_mapping = {} + qnode_mapping = {} + for node in fp32_model.graph_def.node: + fp32_node_mapping[node.name] = node + for node in quan_model.graph_def.node: + qnode_mapping[node.name] = node + supported_op_lst = set(["Conv2D", "MatMul", "ConcatV2", "MaxPool", "AvgPool", "DepthwiseConv2dNative"]) + fp32_node_lst = set() + for node in fp32_model.graph_def.node: + if node.op in supported_op_lst: + fp32_node_lst.add(node.name) + int8_node_lst = set() + bf16_node_lst = set() + for node in quan_model.graph_def.node: + node_name = node.name + node_name = int8_node_name_reverse(node) + if "Quantized" in node.op: + int8_node_lst.add(node_name) + elif node.attr["value"].tensor.dtype == tf.dtypes.bfloat16.as_datatype_enum: # pragma: no cover + bf16_node_lst.add(node.name) + else: + continue + inspect_node_lst = fp32_node_lst.intersection(bf16_node_lst.union(int8_node_lst)) + activation_min_max, updated_cfg = _parse_config(quan_model.q_config, tune_cfg, inspect_node_lst) + dump_data_to_local(activation_min_max, save_path, "activation_min_max.pkl") + dump_data_to_local(updated_cfg, save_path, "cfg.pkl") + + return inspect_node_lst, updated_cfg + + def _parse_config(q_config, cfg, op_list): """Parse q_config and get dequantize min max value.""" activation_min_max = {} diff --git a/neural_compressor/tensorflow/utils/__init__.py b/neural_compressor/tensorflow/utils/__init__.py index deb15140c92..e92b61adae4 100644 --- a/neural_compressor/tensorflow/utils/__init__.py +++ b/neural_compressor/tensorflow/utils/__init__.py @@ -12,14 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from neural_compressor.tensorflow.utils.model import Model, framework_specific_info from neural_compressor.tensorflow.utils.data import BaseDataLoader, DummyDataset, DummyDatasetV2 -from neural_compressor.tensorflow.utils.model import ( - Model, - TFConfig, -) from neural_compressor.tensorflow.utils.constants import ( SPR_BASE_VERSIONS, - TENSORFLOW_DEFAULT_CONFIG, DEFAULT_SQ_ALPHA_ARGS, UNIFY_OP_TYPE_MAPPING, ) diff --git a/neural_compressor/tensorflow/utils/constants.py b/neural_compressor/tensorflow/utils/constants.py index c6f2f6aba0c..5e500d67426 100644 --- a/neural_compressor/tensorflow/utils/constants.py +++ b/neural_compressor/tensorflow/utils/constants.py @@ -20,17 +20,9 @@ "2.14.0202335", "2.14.dev202335", "2.15.0202341", + "2.16.1", ) -TENSORFLOW_DEFAULT_CONFIG = { - "device": "cpu", - "backend": "default", - "approach": "post_training_static_quant", - "random_seed": 1978, - "format": "default", - "use_bf16": True, -} - DEFAULT_SQ_ALPHA_ARGS = { "alpha_min": 0.0, "alpha_max": 1.0, @@ -44,7 +36,6 @@ "Conv3D": "conv3d", "DepthwiseConv2dNative": "conv2d", "FusedBatchNormV3": "batchnorm", - "FusedBatchNorm": "batchnorm", "_MklFusedInstanceNorm": "instancenorm", "MaxPool": "pooling", "MaxPool3D": "pooling", diff --git a/neural_compressor/tensorflow/utils/data.py b/neural_compressor/tensorflow/utils/data.py index ab50de81fc5..bdf4ce1d9bf 100644 --- a/neural_compressor/tensorflow/utils/data.py +++ b/neural_compressor/tensorflow/utils/data.py @@ -602,4 +602,4 @@ def __iter__(self): def __len__(self): """Return the length of dataset.""" - return sys.maxsize \ No newline at end of file + return sys.maxsize diff --git a/neural_compressor/tensorflow/utils/model.py b/neural_compressor/tensorflow/utils/model.py index 75334446c4c..ddde25586c0 100644 --- a/neural_compressor/tensorflow/utils/model.py +++ b/neural_compressor/tensorflow/utils/model.py @@ -15,32 +15,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy - from neural_compressor.common.utils import DEFAULT_WORKSPACE -from neural_compressor.tensorflow.utils.constants import TENSORFLOW_DEFAULT_CONFIG -from neural_compressor.tensorflow.utils.model_wrappers import BaseModel, KerasModel, TensorflowModel, get_tf_model_type -from neural_compressor.tensorflow.utils.utility import singleton - - -@singleton -class TensorflowGlobalConfig: - global_config = { - "device": "cpu", - "backend": "default", - "approach": "post_training_static_quant", - "random_seed": 1978, - "workspace_path": DEFAULT_WORKSPACE, - "format": "default", - "use_bf16": True, - } - - def reset_global_config(self): - self.global_config = copy.deepcopy(TENSORFLOW_DEFAULT_CONFIG) - self.global_config["workspace_path"] = DEFAULT_WORKSPACE - - -TFConfig = TensorflowGlobalConfig() +from neural_compressor.tensorflow.utils.model_wrappers import ( + BaseModel, + KerasModel, + TensorflowModel, + get_tf_model_type, + TensorflowSubclassedKerasModel, +) + +framework_specific_info = { + "device": "cpu", + "backend": "default", + "approach": "post_training_static_quant", + "random_seed": 1978, + "workspace_path": DEFAULT_WORKSPACE, + "format": "default", +} class Model(object): @@ -59,6 +50,7 @@ def __new__(cls, root, **kwargs): from neural_compressor.tensorflow.utils import itex_installed if isinstance(root, BaseModel): + framework_specific_info["backend"] = "itex" return root if kwargs.get("approach", None) == "quant_aware_training": @@ -71,43 +63,48 @@ def __new__(cls, root, **kwargs): if model_type == "keras" and not itex_installed(): model_type = "saved_model" + # model = TensorflowSubclassedKerasModel(root) + # framework_specific_info["backend"] = "itex" model = TensorflowModel(model_type, root, **kwargs) - conf = kwargs.pop("conf", None) - cls.set_tf_config(conf, model) + conf = kwargs.pop("conf", "NA") + cls.set_framework_info(conf, model) return model @staticmethod - def set_tf_config(conf, model): - config = TFConfig.global_config + def set_framework_info(conf, model): + if conf == "NA": + return framework = "keras" if isinstance(model, KerasModel) else "tensorflow" - if conf and "device" in conf: - config["device"] = conf["device"] - if conf and "approach" in conf: - config["approach"] = conf["approach"] - if conf and "random_seed" in conf: - config["random_seed"] = conf["random_seed"] - if conf and "inputs" in conf: - config["inputs"] = conf["inputs"] - if conf and "outputs" in conf: - config["outputs"] = conf["outputs"] + if conf.device: + framework_specific_info["device"] = conf.device + if conf.approach: + framework_specific_info["approach"] = conf.approach + if conf.random_seed: + framework_specific_info["random_seed"] = conf.random_seed + if conf.inputs: + framework_specific_info["inputs"] = conf.inputs + if conf.outputs: + framework_specific_info["outputs"] = conf.outputs if framework == "keras": - config["backend"] = "itex" + framework_specific_info["backend"] = "itex" return from neural_compressor.tensorflow.utils import itex_installed - if conf and "performance_only" in conf: - config["performance_only"] = conf["performance_only"] + if conf.performance_only: + framework_specific_info["performance_only"] = conf.performance_only if itex_installed(): - config["backend"] = "itex" - if conf and "workspace_path" in conf: - config["workspace_path"] = conf["workspace_path"] - if conf and "recipes" in conf: - config["recipes"] = conf["recipes"] + framework_specific_info["backend"] = "itex" + if conf.workspace_path: + framework_specific_info["workspace_path"] = conf.workspace_path + if conf.recipes: + framework_specific_info["recipes"] = conf.recipes + + framework_specific_info["use_bf16"] = conf.use_bf16 if conf.use_bf16 else False for item in ["scale_propagation_max_pooling", "scale_propagation_concat"]: - if "recipes" in config and item not in config["recipes"]: - config["recipes"].update({item: True}) + if framework_specific_info["recipes"] and item not in framework_specific_info["recipes"]: + framework_specific_info["recipes"].update({item: True}) diff --git a/neural_compressor/tensorflow/utils/model_wrappers.py b/neural_compressor/tensorflow/utils/model_wrappers.py index 2628ad1edb8..5629bb140e2 100644 --- a/neural_compressor/tensorflow/utils/model_wrappers.py +++ b/neural_compressor/tensorflow/utils/model_wrappers.py @@ -67,7 +67,7 @@ def get_model_type(model): if isinstance(model, str): model = os.path.abspath(os.path.expanduser(model)) if ( - ((model.endswith(".h5") or model.endswith(".keras")) and os.path.isfile(model)) + (model.endswith(".h5") and os.path.isfile(model)) or is_saved_model_format(os.path.dirname(model)) or (os.path.isdir(model) and is_saved_model_format(model)) ): @@ -325,62 +325,23 @@ def load_saved_model(model, saved_model_tags, input_tensor_names, output_tensor_ return opt, input_tensor_names, output_tensor_names -def _get_graph_from_saved_model_v3(model, input_tensor_names, output_tensor_names): - """The version 3 function that get graph from saved_model. - - Args: - model (string or tf.keras.Model): model path or tf.keras.Model object. - input_tensor_names (list of string): input tensor names of the model. - output_tensor_names (list of string): output tensor names of the model. - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - inputs (list of string): validated input names. - outputs (list of string): validated output names. - """ - from neural_compressor.adaptor.tf_utils.util import parse_saved_model - - if isinstance(model, tf.keras.Model): - tmp_dir = DEFAULT_WORKSPACE + "/saved_model" - model.save(tmp_dir) - model = tmp_dir - graph_def, _, _, _, input_names, output_names = parse_saved_model( - model, True, input_tensor_names, output_tensor_names - ) - - return graph_def, input_names, output_names - - def _get_graph_from_saved_model_v2(saved_model_dir, input_tensor_names, output_tensor_names): - """The version 2 function that get graph from the original keras model. - - Args: - saved_model_dir (string): model path of a temporary saved_model. - input_tensor_names (list of string): input tensor names of the model. - output_tensor_names (list of string): output tensor names of the model. - - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - input_names (list of string): validated input names. - output_names (list of string): validated output names. - """ from tensorflow.python.saved_model import signature_constants, tag_constants + from neural_compressor.tensorflow.quantization.utils.utility import parse_saved_model + saved_model_exported_names = [signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] saved_model_tags = set([tag_constants.SERVING]) + try: + graph_def, _saved_model, _, _, input_names, output_names = parse_saved_model( + saved_model_dir, True, input_tensor_names, output_tensor_names + ) + except: + return load_saved_model(saved_model_dir, saved_model_tags, input_tensor_names, output_tensor_names) + return graph_def, input_names, output_names - return load_saved_model(saved_model_dir, saved_model_tags, input_tensor_names, output_tensor_names) - - -def _get_graph_from_original_keras_v2(model): - """The version 2 function that get graph from the original keras model. - Args: - model (string or tf.keras.Model): model path or tf.keras.Model object. - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - input_names (list of string): validated input names. - output_names (list of string): validated output names. - """ +def _get_graph_from_original_keras_v2(model, output_dir): from tensorflow.lite.python.convert import OpsSet from tensorflow.lite.python.util import ( get_grappler_config, @@ -425,16 +386,6 @@ def _get_graph_from_original_keras_v2(model): def _check_keras_format(model, saved_model_dir): - """Decide which method will be used to get graph from the saved_model . - - Args: - model (string or tf.keras.Model): model path or tf.keras.Model object. - saved_model_dir (string): the path to save a temporary saved_model. - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - inputs (list of string): validated input names. - outputs (list of string): validated output names. - """ from tensorflow.python import saved_model from tensorflow.python.saved_model import save_options from tensorflow.python.saved_model.load import load @@ -455,15 +406,6 @@ def _check_keras_format(model, saved_model_dir): def _get_graph_from_saved_model_v1(model): - """The version 1 function that get graph from saved_model. - - Args: - model (string or tf.keras.Model): model path or tf.keras.Model object. - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - inputs (list of string): validated input names. - outputs (list of string): validated output names. - """ from tensorflow.lite.python.convert_saved_model import get_inputs_outputs, get_meta_graph_def, get_signature_def from tensorflow.python.client import session from tensorflow.python.framework import ops @@ -504,50 +446,6 @@ def _get_graph_from_saved_model_v1(model): return graph_def, inputs, outputs -def try_loading_keras(model, input_tensor_names, output_tensor_names): - """Try different ways of loading keras models. - - Args: - model (string or tf.keras.Model): model path or tf.keras.Model object. - input_tensor_names (list of string): input tensor names of the model. - output_tensor_names (list of string): output tensor names of the model. - Returns: - graph_def (tf.compat.v1.Session): tf.compat.v1.Session object. - input_names (list of string): validated input names. - output_names (list of string): validated output names. - """ - temp_dir = tempfile.mkdtemp() - if not isinstance(model, tf.keras.Model): - model = tf.keras.models.load_model(model) - keras_format = _check_keras_format(model, temp_dir) - - if keras_format == "saved_model_v2": - try: - graph_def, input_names, output_names = _get_graph_from_saved_model_v2( - temp_dir, input_tensor_names, output_tensor_names - ) - if "_FusedBatchNormEx" in [node.op for node in graph_def.node]: - keras_format = "trackable_object" - except: - keras_format = "trackable_object" - - if keras_format == "trackable_object": - try: - graph_def, input_names, output_names = _get_graph_from_original_keras_v2(model) - except: - keras_format = "saved_model_v1" - - if keras_format == "saved_model_v1": # pragma: no cover - try: - tf.keras.backend.set_learning_phase(0) - graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) - except: - raise ValueError("Not supported keras model type...") - - shutil.rmtree(temp_dir, True) - return graph_def, input_names, output_names - - def keras_session(model, input_tensor_names, output_tensor_names, **kwargs): """Build session with keras model. @@ -561,18 +459,37 @@ def keras_session(model, input_tensor_names, output_tensor_names, **kwargs): input_tensor_names (list of string): validated input_tensor_names. output_tensor_names (list of string): validated output_tensor_names. """ + temp_dir = tempfile.mkdtemp() if tf.version.VERSION > "2.1.0": - try: - graph_def, input_names, output_names = _get_graph_from_saved_model_v3( - model, input_tensor_names, output_tensor_names - ) - except: - graph_def, input_names, output_names = try_loading_keras(model, input_tensor_names, output_tensor_names) + if not isinstance(model, tf.keras.Model): + model = tf.keras.models.load_model(model) + keras_format = _check_keras_format(model, temp_dir) + if keras_format == "saved_model_v2": + try: + graph_def, input_names, output_names = _get_graph_from_saved_model_v2( + temp_dir, input_tensor_names, output_tensor_names + ) + if "_FusedBatchNormEx" in [node.op for node in graph_def.node]: + keras_format = "trackable_object" + except: + keras_format = "trackable_object" + if keras_format == "trackable_object": + try: + graph_def, input_names, output_names = _get_graph_from_original_keras_v2(model, temp_dir) + except: + keras_format = "saved_model_v1" + if keras_format == "saved_model_v1": # pragma: no cover + try: + tf.keras.backend.set_learning_phase(0) + graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) + except: + raise ValueError("Not supported keras model type...") + # tensorflow 1.x use v1 convert method else: tf.keras.backend.set_learning_phase(0) graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) - + shutil.rmtree(temp_dir, True) return graph_def_session(graph_def, input_names, output_names, **kwargs) @@ -741,19 +658,12 @@ def saved_model_session(model, input_tensor_names, output_tensor_names, **kwargs output_tensor_names (list of string): validated output_tensor_names. """ try: - graph_def, input_names, output_names = _get_graph_from_saved_model_v3( + graph_def, input_names, output_names = _get_graph_from_saved_model_v2( model, input_tensor_names, output_tensor_names ) except: - try: - graph_def, input_names, output_names = _get_graph_from_saved_model_v2( - model, input_tensor_names, output_tensor_names - ) - except: - graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) - + graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) assert graph_def is not None, "Can not parse the saved model..." - return graph_def_session(graph_def, input_names, output_names, **kwargs) @@ -1278,6 +1188,7 @@ def graph_def(self): def graph_def(self, graph_def): """Set graph definition.""" self._graph_def = graph_def + self.adjust_weight(self.graph_def) # the attributes of some nodes can't be correctly read if don't import the graph_def tf.import_graph_def(self._graph_def, name="") @@ -1309,7 +1220,6 @@ def sq_weight_scale_dict(self): """Return dict of weight scaler for smooth quantization.""" if not self._sq_weight_scale_dict: self._sq_weight_scale_dict = self.kwargs.get("sq_weight_scale_dict", None) - assert self._weight_name_mapping is not None, "sq_weight_scale_dict should not be None!" return self._sq_weight_scale_dict @sq_weight_scale_dict.setter @@ -1369,20 +1279,31 @@ def adjust_weight(self, graph_def): from neural_compressor.tensorflow.quantization.utils.utility import reconstruct_saved_model + if not self.model_path: + self.model_path = DEFAULT_WORKSPACE + self.model_path = os.path.abspath(os.path.expanduser(self.model_path)) + if os.path.exists(self.model_path): + import shutil + shutil.rmtree(self.model_path) + os.makedirs(self.model_path, exist_ok=True) + reconstruct_saved_model(graph_def, self.func, self.frozen_func, self._saved_model, self.model_path) model = load.load(self.model_path, [tag_constants.SERVING]) + if not self._sq_weight_scale_dict: + self._auto_trackable = model + return + for idx, weight_tensor in enumerate(model.variables): parsed_weight_name = self.weight_name_mapping(weight_tensor.name) if parsed_weight_name in self.sq_weight_scale_dict: - if len(weight_tensor.shape) == 4: - shape_parm = [0, 1, 3, 2] - elif len(weight_tensor.shape) == 2: - shape_parm = [1, 0] - weight_array = np.transpose(weight_tensor, shape_parm) - weight_array *= self.sq_weight_scale_dict[parsed_weight_name] - weight_array = np.transpose(weight_array, shape_parm) - tf.compat.v1.assign(model.variables[idx], weight_array) + try: + weight_array = np.transpose(weight_tensor, [1, 0]) + weight_array *= self.sq_weight_scale_dict[parsed_weight_name] + weight_array = np.transpose(weight_array, [1, 0]) + tf.compat.v1.assign(model.variables[idx], weight_array) + except: + breakpoint() else: weight_array = weight_tensor @@ -1403,14 +1324,72 @@ def save(self, root=None): shutil.rmtree(root) os.makedirs(root, exist_ok=True) - self.adjust_weight(self._graph_def) - graph_def, _saved_model, func, frozen_func, _, _ = parse_saved_model(self._auto_trackable) + if self.sq_weight_scale_dict: + self.adjust_weight(self._graph_def) + graph_def, _saved_model, func, frozen_func, _, _ = parse_saved_model(self.model) reconstruct_saved_model(graph_def, func, frozen_func, _saved_model, root) logger.info("Save quantized model to {}.".format(root)) # delete the LLM file saved in this temporary path shutil.rmtree(self.model_path, ignore_errors=True) +class TensorflowSubclassedKerasModel(TensorflowSavedModelModel): + """Build a subclassed Keras model.""" + + def __init__(self, model="", **kwargs): + """Initialize a subclassed Keras model. + + Args: + model (string or tf.keras.Model object): model path or model object. + """ + super(TensorflowSubclassedKerasModel, self).__init__(model) + self.model_type = "saved_model" + self._keras_model = None + + def _build_as_functional_model(self, model_path): + breakpoint() + TFSMlayer = tf.keras.layers.TFSMLayer(model_path, call_endpoint="serving_default") + inputs = tf.keras.Input(shape=(3, 224, 224)) + outputs = TFSMlayer(inputs) + return tf.keras.Model(inputs, outputs) + + @property + def model(self): + """Return model in Keras Functional object.""" + if self._keras_model: + return self._keras_model + + + root = DEFAULT_WORKSPACE + "/saved_model" + root = os.path.abspath(os.path.expanduser(root)) + if os.path.exists(root): + shutil.rmtree(root) + os.makedirs(root, exist_ok=True) + if not self._sess: + self._load_sess(self._model, **self.kwargs) + _, builder = self.build_saved_model(root) + builder.save() + self._keras_model = self._build_as_functional_model(root) + shutil.rmtree(root) + + return self._keras_model + + @model.setter + def model(self, q_model): + """Set model itself.""" + self._keras_model = q_model + + def save(self, root=None): + """Save Tensorflow QAT model.""" + if not root: + root = DEFAULT_WORKSPACE + "/keras_model.keras" + root = os.path.abspath(os.path.expanduser(root)) + os.makedirs(os.path.dirname(root), exist_ok=True) + + self.model.save(root) + return root + + class TensorflowQATModel(TensorflowSavedModelModel): """Build Tensorflow QAT model.""" @@ -1642,24 +1621,12 @@ def report_sparsity(self): @property def input_node_names(self): """Return input node names.""" - names = ( - self.model.input_names - if version1_lt_version2(tf.version.VERSION, "2.16.1") - else [tensor.name for tensor in self.model.inputs] - ) - - return names + return self.model.input_names @property def output_node_names(self): """Return output node names.""" - names = ( - self.model.output_names - if version1_lt_version2(tf.version.VERSION, "2.16.1") - else [tensor.name for tensor in self.model.outputs] - ) - - return names + return self.model.output_names TENSORFLOW_MODELS = { diff --git a/neural_compressor/tensorflow/utils/utility.py b/neural_compressor/tensorflow/utils/utility.py index ed1fc88aee8..4ce8cc6faaa 100644 --- a/neural_compressor/tensorflow/utils/utility.py +++ b/neural_compressor/tensorflow/utils/utility.py @@ -97,7 +97,7 @@ def deep_get(dictionary, keys, default=None): def itex_installed(): """Check if the IntelĀ® Extension for TensorFlow has been installed.""" try: - import intel_extension_for_tensorflow + # import intel_extension_for_tensorflow return True except: From 9dd4beb98b64009e1a71c1e2320fadcb19f4cf7b Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 22 Apr 2024 13:40:02 +0800 Subject: [PATCH 07/25] add hf resnet50 example Signed-off-by: zehao-intel --- .../image_recognition/hf_resnet50/main.py | 35 +++++++++++++++++++ .../algorithms/static_quant/tensorflow.yaml | 2 +- .../tensorflow/utils/constants.py | 1 + 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 examples/keras/image_recognition/hf_resnet50/main.py diff --git a/examples/keras/image_recognition/hf_resnet50/main.py b/examples/keras/image_recognition/hf_resnet50/main.py new file mode 100644 index 00000000000..14cbb6dcad7 --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/main.py @@ -0,0 +1,35 @@ +from neural_compressor.tensorflow.utils import BaseDataLoader +import tensorflow as tf +from transformers import AutoImageProcessor +from datasets import load_dataset + +dataset = load_dataset("huggingface/cats-image") +image = dataset["test"]["image"][0] + +image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") +input_data = image_processor(image, return_tensors="tf") + +class Dataset(object): + def __init__(self, batch_size=100): + self.length = 100 + self.batch_size = 1 + self.data = [input_data['pixel_values'].numpy()]*100 + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return self.data[idx][0], None + + +calib_dataloader = BaseDataLoader(dataset=Dataset()) + +from neural_compressor.tensorflow import StaticQuantConfig, quantize_model +from neural_compressor.tensorflow.utils.model_wrappers import TensorflowSavedModelModel + +quant_config = StaticQuantConfig() +model = TensorflowSavedModelModel("resnet50-saved-model/saved_model/1") +model.model_type="saved_model" +q_model = quantize_model(model, quant_config, calib_dataloader) + +q_model.save("resnet50_uniform_qdq") diff --git a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml index 9e9d7e5952c..45a0f526faf 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml +++ b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml @@ -16,7 +16,7 @@ --- - version: - name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1'] + name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1', '2.17.0'] bf16: ["_MklLayerNorm", "Conv2D", "Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2", "DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "GRUBlockCell", diff --git a/neural_compressor/tensorflow/utils/constants.py b/neural_compressor/tensorflow/utils/constants.py index 5e500d67426..5a29a0228e2 100644 --- a/neural_compressor/tensorflow/utils/constants.py +++ b/neural_compressor/tensorflow/utils/constants.py @@ -21,6 +21,7 @@ "2.14.dev202335", "2.15.0202341", "2.16.1", + "2.17.0", ) DEFAULT_SQ_ALPHA_ARGS = { From 2a6d162724b387ab0620e6d9308d27ff4fa0b54e Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 22 Apr 2024 14:28:55 +0800 Subject: [PATCH 08/25] fix uint8 max Signed-off-by: zehao-intel --- .../keras/image_recognition/hf_bert/main.py | 945 ++++++++++++++++++ .../int8/convert_qdq_to_uniform_qdq.py | 4 +- 2 files changed, 947 insertions(+), 2 deletions(-) create mode 100644 examples/keras/image_recognition/hf_bert/main.py diff --git a/examples/keras/image_recognition/hf_bert/main.py b/examples/keras/image_recognition/hf_bert/main.py new file mode 100644 index 00000000000..6eabec7e867 --- /dev/null +++ b/examples/keras/image_recognition/hf_bert/main.py @@ -0,0 +1,945 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for question answering. +""" +# You can also adapt this script on your own question answering task. Pointers for this are left as comments. + +import json +import logging +import os +import sys +import time +import warnings +from dataclasses import dataclass, field +from pathlib import Path +from statistics import mean +from typing import Optional + +import evaluate +import tensorflow as tf +from datasets import load_dataset +from packaging.version import parse +from utils_qa import postprocess_qa_predictions + +import transformers +from transformers import ( + AutoConfig, + AutoTokenizer, + EvalPrediction, + HfArgumentParser, + PreTrainedTokenizerFast, + PushToHubCallback, + TFAutoModelForQuestionAnswering, + TFTrainingArguments, + create_optimizer, + set_seed, +) +from transformers.utils import CONFIG_NAME, TF2_WEIGHTS_NAME, check_min_version, send_example_telemetry + + +try: + import tf_keras as keras +except (ModuleNotFoundError, ImportError): + import keras + + if parse(keras.__version__).major > 2: + raise ValueError( + "Your currently installed version of Keras is Keras 3, but this is not yet supported in " + "Transformers. Please install the backwards-compatible tf-keras package with " + "`pip install tf-keras`." + ) + + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +check_min_version("4.38.0.dev0") + +logger = logging.getLogger(__name__) + + +# region Arguments +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + # TODO: Add support for profile + mode: Optional[str] = field( + default="benchmark", metadata={"help": "One of two options: benchmark/accuracy."} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + token: str = field( + default=None, + metadata={ + "help": ( + "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " + "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." + ) + }, + ) + use_auth_token: bool = field( + default=None, + metadata={ + "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." + }, + ) + trust_remote_code: bool = field( + default=False, + metadata={ + "help": ( + "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " + "should only be set to `True` for repositories you trust and in which you have read the code, as it will " + "execute code present on the Hub on your local machine." + ) + }, + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + precision: Optional[str] = field( + default="fp32", metadata={"help": "The precision used to run the model. Can be fp32/bf16."} + ) + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + ) + test_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, + ) + batch_size: Optional[int] = field( + default=128, + metadata={"help": "Specify the batch size. If this parameter is not specified, use the default batch size."}, + ) + # train_eval_warmup_steps is added to override 'warmup_steps' option in src/transformers/training_args.py + train_eval_warmup_steps: Optional[int] = field( + default=10, + metadata={"help": "Number of warmup steps for training and eval."}, + ) + steps: Optional[int] = field( + default=30, + metadata={"help": "Number of steps for training and eval."}, + ) + num_inter_threads: Optional[int] = field( + default=0, + metadata={"help": "Number of inter-op parallelism threads to use for training and eval."}, + ) + num_intra_threads: Optional[int] = field( + default=0, + metadata={"help": "Number of intra-op parallelism threads to use for training and eval."}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + max_seq_length: int = field( + default=384, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + pad_to_max_length: bool = field( + default=True, + metadata={ + "help": ( + "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" + " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." + ) + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + ) + }, + ) + version_2_with_negative: bool = field( + default=False, metadata={"help": "If true, some of the examples do not have an answer."} + ) + null_score_diff_threshold: float = field( + default=0.0, + metadata={ + "help": ( + "The threshold used to select the null answer: if the best answer has a score that is less than " + "the score of the null answer minus this threshold, the null answer is selected for this example. " + "Only useful when `version_2_with_negative=True`." + ) + }, + ) + doc_stride: int = field( + default=128, + metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, + ) + n_best_size: int = field( + default=20, + metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, + ) + max_answer_length: int = field( + default=30, + metadata={ + "help": ( + "The maximum length of an answer that can be generated. This is needed because the start " + "and end predictions are not conditioned on one another." + ) + }, + ) + + def __post_init__(self): + if ( + self.dataset_name is None + and self.train_file is None + and self.validation_file is None + and self.test_file is None + ): + raise ValueError("Need either a dataset name or a training/validation file/test_file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + if self.test_file is not None: + extension = self.test_file.split(".")[-1] + assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." + + +# endregion + + +# region Helper classes +class SavePretrainedCallback(keras.callbacks.Callback): + # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary + # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback + # that saves the model with this method after each epoch. + def __init__(self, output_dir, **kwargs): + super().__init__() + self.output_dir = output_dir + + def on_epoch_end(self, epoch, logs=None): + saved_model_dir = self.output_dir + '_epoch_' + str(epoch) + self.model.save_pretrained(saved_model_dir, saved_model=True) + + +# endregion + +class TimingCallback(keras.callbacks.Callback): + def __init__(self, batch_size, warmup_steps, steps): + self.total_time = 0 + self.batch_size = batch_size + self.num_processed_examples = 0 + self.warmup_steps = warmup_steps + self.steps = steps + + def on_predict_batch_begin(self, iteration, logs={}): + if iteration == self.warmup_steps: + # Start timer once warmup steps are done + self.start_time = time.time() + # Display start/stop info only if ONEDNN_VERBOSE is set + if os.getenv("ONEDNN_VERBOSE") and iteration >= self.warmup_steps: + logger.info('\n---> Start iteration {0}'.format(str(iteration - self.warmup_steps))) + + def on_predict_batch_end(self, iteration, logs={}): + self.num_processed_examples += self.batch_size + if os.getenv("ONEDNN_VERBOSE") and iteration >= self.warmup_steps: + logger.info('\n---> Stop iteration {0}'.format(str(iteration - self.warmup_steps))) + if iteration == self.steps - 1: + # Stop timer after the last step + self.total_time = time.time() - self.start_time + + +def main(): + # region Argument parsing + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + tf.config.threading.set_inter_op_parallelism_threads(data_args.num_inter_threads) + tf.config.threading.set_intra_op_parallelism_threads(data_args.num_intra_threads) + + print("\n********** Using model_name_or_path from " + model_args.model_name_or_path + " **********\n") + + if data_args.precision == "bfloat16": + #keras.mixed_precision.set_global_policy('mixed_bfloat16') + tf.config.optimizer.set_experimental_options({'auto_mixed_precision_onednn_bfloat16': True}) + print(tf.config.optimizer.get_experimental_options()) + + if model_args.use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", + FutureWarning, + ) + if model_args.token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + model_args.token = model_args.use_auth_token + + # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The + # information sent is the one passed as arguments along with your Python/PyTorch versions. + send_example_telemetry("run_qa", model_args, data_args, framework="tensorflow") + + output_dir = Path(training_args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + # endregion + + # region Checkpoints + checkpoint = None + if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: + if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): + checkpoint = output_dir + logger.info( + f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" + " behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + #else: + # raise ValueError( + # f"Output directory ({training_args.output_dir}) already exists and is not empty. " + # "Use --overwrite_output_dir to continue regardless." + # ) + # endregion + + # region Logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + logger.setLevel(logging.INFO) + + # Set the verbosity to info of the Transformers logger (on main process only): + if training_args.should_log: + transformers.utils.logging.set_verbosity_info() + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + #logger.info(f"Training/evaluation parameters {training_args}") + # endregion + + # Set seed before initializing model. + set_seed(training_args.seed) + + logger.info("Running " + model_args.mode + " for batch size " + str(data_args.batch_size)) + + training_args.do_eval = True + + # region Load Data + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called + # 'text' is found. You can easily tweak this behavior (see below). + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + datasets = load_dataset( + data_args.dataset_name, + data_args.dataset_config_name, + cache_dir=model_args.cache_dir, + token=model_args.token, + ) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + + if data_args.validation_file is not None: + data_files["validation"] = data_args.validation_file + extension = data_args.validation_file.split(".")[-1] + if data_args.test_file is not None: + data_files["test"] = data_args.test_file + extension = data_args.test_file.split(".")[-1] + datasets = load_dataset( + extension, + data_files=data_files, + field="data", + cache_dir=model_args.cache_dir, + token=model_args.token, + ) + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets. + # endregion + + # region Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + config = AutoConfig.from_pretrained( + model_args.config_name if model_args.config_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + token=model_args.token, + trust_remote_code=model_args.trust_remote_code, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + use_fast=True, + revision=model_args.model_revision, + token=model_args.token, + trust_remote_code=model_args.trust_remote_code, + ) + # endregion + + # region Tokenizer check: this script requires a fast tokenizer. + if not isinstance(tokenizer, PreTrainedTokenizerFast): + raise ValueError( + "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" + " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" + " this requirement" + ) + # endregion + + # region Preprocessing the datasets + # Preprocessing is slightly different for training and evaluation. + if training_args.do_train: + column_names = datasets["train"].column_names + elif training_args.do_eval: + column_names = datasets["validation"].column_names + else: + column_names = datasets["test"].column_names + question_column_name = "question" if "question" in column_names else column_names[0] + context_column_name = "context" if "context" in column_names else column_names[1] + answer_column_name = "answers" if "answers" in column_names else column_names[2] + + # Padding side determines if we do (question|context) or (context|question). + pad_on_right = tokenizer.padding_side == "right" + + if data_args.max_seq_length > tokenizer.model_max_length: + logger.warning( + f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " + f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." + ) + max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) + + if data_args.pad_to_max_length or isinstance(training_args.strategy, tf.distribute.TPUStrategy): + logger.info("Padding all batches to max length because argument was set or we're on TPU.") + padding = "max_length" + else: + padding = False + + # Training preprocessing + def prepare_train_features(examples): + # Some of the questions have lots of whitespace on the left, which is not useful and will make the + # truncation of the context fail (the tokenized question will take a lots of space). So we remove that + # left whitespace + examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] + + # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results + # in one example possible giving several features when a context is long, each of those features having a + # context that overlaps a bit the context of the previous feature. + tokenized_examples = tokenizer( + examples[question_column_name if pad_on_right else context_column_name], + examples[context_column_name if pad_on_right else question_column_name], + truncation="only_second" if pad_on_right else "only_first", + max_length=max_seq_length, + stride=data_args.doc_stride, + return_overflowing_tokens=True, + return_offsets_mapping=True, + padding=padding, + ) + + # Since one example might give us several features if it has a long context, we need a map from a feature to + # its corresponding example. This key gives us just that. + sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") + # The offset mappings will give us a map from token to character position in the original context. This will + # help us compute the start_positions and end_positions. + offset_mapping = tokenized_examples.pop("offset_mapping") + + # Let's label those examples! + tokenized_examples["start_positions"] = [] + tokenized_examples["end_positions"] = [] + + for i, offsets in enumerate(offset_mapping): + # We will label impossible answers with the index of the CLS token. + input_ids = tokenized_examples["input_ids"][i] + cls_index = input_ids.index(tokenizer.cls_token_id) + + # Grab the sequence corresponding to that example (to know what is the context and what is the question). + sequence_ids = tokenized_examples.sequence_ids(i) + + # One example can give several spans, this is the index of the example containing this span of text. + sample_index = sample_mapping[i] + answers = examples[answer_column_name][sample_index] + # If no answers are given, set the cls_index as answer. + if len(answers["answer_start"]) == 0: + tokenized_examples["start_positions"].append(cls_index) + tokenized_examples["end_positions"].append(cls_index) + else: + # Start/end character index of the answer in the text. + start_char = answers["answer_start"][0] + end_char = start_char + len(answers["text"][0]) + + # Start token index of the current span in the text. + token_start_index = 0 + while sequence_ids[token_start_index] != (1 if pad_on_right else 0): + token_start_index += 1 + + # End token index of the current span in the text. + token_end_index = len(input_ids) - 1 + while sequence_ids[token_end_index] != (1 if pad_on_right else 0): + token_end_index -= 1 + + # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). + if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): + tokenized_examples["start_positions"].append(cls_index) + tokenized_examples["end_positions"].append(cls_index) + else: + # Otherwise move the token_start_index and token_end_index to the two ends of the answer. + # Note: we could go after the last offset if the answer is the last word (edge case). + while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: + token_start_index += 1 + tokenized_examples["start_positions"].append(token_start_index - 1) + while offsets[token_end_index][1] >= end_char: + token_end_index -= 1 + tokenized_examples["end_positions"].append(token_end_index + 1) + + return tokenized_examples + + processed_datasets = {} + if training_args.do_train: + if "train" not in datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = datasets["train"] + if data_args.max_train_samples is not None: + # We will select sample from whole data if argument is specified + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + # Create train feature from dataset + train_dataset = train_dataset.map( + prepare_train_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + ) + if data_args.max_train_samples is not None: + # Number of samples might increase during Feature Creation, We select only specified max samples + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + processed_datasets["train"] = train_dataset + + # Validation preprocessing + def prepare_validation_features(examples): + # Some of the questions have lots of whitespace on the left, which is not useful and will make the + # truncation of the context fail (the tokenized question will take a lots of space). So we remove that + # left whitespace + examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] + + # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results + # in one example possible giving several features when a context is long, each of those features having a + # context that overlaps a bit the context of the previous feature. + tokenized_examples = tokenizer( + examples[question_column_name if pad_on_right else context_column_name], + examples[context_column_name if pad_on_right else question_column_name], + truncation="only_second" if pad_on_right else "only_first", + max_length=max_seq_length, + stride=data_args.doc_stride, + return_overflowing_tokens=True, + return_offsets_mapping=True, + padding=padding, + ) + + # Since one example might give us several features if it has a long context, we need a map from a feature to + # its corresponding example. This key gives us just that. + sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") + + # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the + # corresponding example_id and we will store the offset mappings. + tokenized_examples["example_id"] = [] + + for i in range(len(tokenized_examples["input_ids"])): + # Grab the sequence corresponding to that example (to know what is the context and what is the question). + sequence_ids = tokenized_examples.sequence_ids(i) + context_index = 1 if pad_on_right else 0 + + # One example can give several spans, this is the index of the example containing this span of text. + sample_index = sample_mapping[i] + tokenized_examples["example_id"].append(examples["id"][sample_index]) + + # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token + # position is part of the context or not. + tokenized_examples["offset_mapping"][i] = [ + (o if sequence_ids[k] == context_index else None) + for k, o in enumerate(tokenized_examples["offset_mapping"][i]) + ] + + return tokenized_examples + + if training_args.do_eval: + if "validation" not in datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_examples = datasets["validation"] + if data_args.max_eval_samples is not None: + # We will select sample from whole data + max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) + eval_examples = eval_examples.select(range(max_eval_samples)) + # Validation Feature Creation + eval_dataset = eval_examples.map( + prepare_validation_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + ) + if data_args.max_eval_samples is not None: + # During Feature creation dataset samples might increase, we will select required samples again + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + processed_datasets["validation"] = eval_dataset + + if training_args.do_predict: + if "test" not in datasets: + raise ValueError("--do_predict requires a test dataset") + predict_examples = datasets["test"] + if data_args.max_predict_samples is not None: + # We will select sample from whole data + predict_examples = predict_examples.select(range(data_args.max_predict_samples)) + # Predict Feature Creation + predict_dataset = predict_examples.map( + prepare_validation_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + ) + if data_args.max_predict_samples is not None: + # During Feature creation dataset samples might increase, we will select required samples again + max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) + predict_dataset = predict_dataset.select(range(max_predict_samples)) + processed_datasets["test"] = predict_dataset + # endregion + + # region Metrics and Post-processing: + def post_processing_function(examples, features, predictions, stage="eval"): + # Post-processing: we match the start logits and end logits to answers in the original context. + predictions = postprocess_qa_predictions( + examples=examples, + features=features, + predictions=predictions, + version_2_with_negative=data_args.version_2_with_negative, + n_best_size=data_args.n_best_size, + max_answer_length=data_args.max_answer_length, + null_score_diff_threshold=data_args.null_score_diff_threshold, + output_dir=training_args.output_dir, + prefix=stage, + ) + # Format the result to the format the metric expects. + if data_args.version_2_with_negative: + formatted_predictions = [ + {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() + ] + else: + formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] + + references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] + return EvalPrediction(predictions=formatted_predictions, label_ids=references) + + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) + + def compute_metrics(p: EvalPrediction): + return metric.compute(predictions=p.predictions, references=p.label_ids) + + # endregion + + with training_args.strategy.scope(): + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + num_replicas = training_args.strategy.num_replicas_in_sync + + # region Load model and prepare datasets + if checkpoint is None: + model_path = model_args.model_name_or_path + else: + model_path = checkpoint + model = TFAutoModelForQuestionAnswering.from_pretrained( + model_path, + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + token=model_args.token, + trust_remote_code=model_args.trust_remote_code, + ) + if training_args.do_train: + training_dataset = model.prepare_tf_dataset( + processed_datasets["train"], + shuffle=True, + batch_size=training_args.per_device_train_batch_size * num_replicas, + tokenizer=tokenizer, + ) + + training_dataset = training_dataset.with_options(dataset_options) + + num_train_steps = len(training_dataset) * training_args.num_train_epochs + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + + optimizer, schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=len(training_dataset) * training_args.num_train_epochs, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, + ) + + # Transformers models compute the right loss for their task by default when labels are passed, and will + # use this for training unless you specify your own loss function in compile(). + model.compile(optimizer=optimizer, jit_compile=training_args.xla, metrics=["accuracy"]) + + else: + # Convert trainable kernels to numpy arrays so that XLA can treat + # them as constants for inference optimization. + for submodule in model.submodules: + if hasattr(submodule, 'kernel'): + submodule.kernel = submodule.kernel.numpy() + model.compile(jit_compile=training_args.xla) + training_dataset = None + + if training_args.do_eval: + eval_dataset = model.prepare_tf_dataset( + processed_datasets["validation"], + shuffle=False, + batch_size=training_args.per_device_train_batch_size * num_replicas, + tokenizer=tokenizer, + ) + eval_dataset = eval_dataset.with_options(dataset_options) + else: + eval_dataset = None + + if training_args.do_predict: + predict_dataset = model.prepare_tf_dataset( + processed_datasets["test"], + shuffle=False, + batch_size=training_args.per_device_eval_batch_size * num_replicas, + tokenizer=tokenizer, + ) + predict_dataset = predict_dataset.with_options(dataset_options) + else: + predict_dataset = None + + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-question-answering" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + hub_model_id=push_to_hub_model_id, + hub_token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] + # endregion + + # region Training and Evaluation + + if training_args.do_train: + # Note that the validation and test datasets have been processed in a different way to the + # training datasets in this example, and so they don't have the same label structure. + # As such, we don't pass them directly to Keras, but instead get model predictions to evaluate + # after training. + callbacks = SavePretrainedCallback(training_args.output_dir) + model.fit(training_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) + + if training_args.do_eval: + logger.info("*** Evaluation ***") + + # In this example, we compute advanced metrics at the end of training, but + # if you'd like to compute metrics every epoch that are too complex to be written as + # standard Keras metrics, you can use our KerasMetricCallback. See + # https://huggingface.co/docs/transformers/main/en/main_classes/keras_callbacks + if model_args.mode == "benchmark": + timing_callback = TimingCallback(data_args.batch_size, data_args.train_eval_warmup_steps, data_args.steps) + # Compute total_steps since model.predict() does not accept warmup_steps as an argument + total_steps = data_args.train_eval_warmup_steps + data_args.steps + + eval_predictions = model.predict(eval_dataset, batch_size=data_args.batch_size, + callbacks=[timing_callback], steps=total_steps) + + assert timing_callback.total_time > 0 + + # Compute throughput by excluding num_warmup_examples + num_warmup_examples = data_args.train_eval_warmup_steps * data_args.batch_size + num_benchmark_examples = timing_callback.num_processed_examples - num_warmup_examples + eval_throughput = num_benchmark_examples / timing_callback.total_time + + logger.info("Batch size: %d" % data_args.batch_size) + logger.info("Total examples: %d, Warmup examples: %d" % (timing_callback.num_processed_examples, num_warmup_examples)) + logger.info("Benchmark examples: %d, Benchmark time: %3.2f secs" % (num_benchmark_examples, timing_callback.total_time)) + logger.info("Throughput (examples/sec): %3.2f" % eval_throughput) + + if data_args.batch_size == 1: + eval_latency = (data_args.batch_size / eval_throughput) * 1000 + logger.info("Latency: %.2f ms" % (eval_latency)) + + elif model_args.mode == "accuracy": + eval_predictions = model.predict(eval_dataset, batch_size=data_args.batch_size) + logger.info("Computing evaluation metrics...") + if isinstance(eval_predictions.start_logits, tf.RaggedTensor): + # If predictions are RaggedTensor, we densify them. Since they are logits, padding with 0 is a bad idea! + # The reason is that a logit of 0 can often end up as quite a high probability value, sometimes even + # the highest probability in a sample. Instead, we use a large negative value, which ensures that the + # padding positions are correctly masked. + eval_start_logits = eval_predictions.start_logits.to_tensor(default_value=-1000).numpy() + eval_end_logits = eval_predictions.end_logits.to_tensor(default_value=-1000).numpy() + else: + eval_start_logits = eval_predictions.start_logits + eval_end_logits = eval_predictions.end_logits + + post_processed_eval = post_processing_function( + datasets["validation"], + processed_datasets["validation"], + (eval_start_logits, eval_end_logits), + ) + metrics = compute_metrics(post_processed_eval) + logger.info("Evaluation metrics:") + for metric, value in metrics.items(): + logger.info(f"{metric}: {value:.3f}") + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(metrics)) + # endregion + + # region Prediction + if training_args.do_predict: + logger.info("*** Predict ***") + + test_predictions = model.predict(predict_dataset) + if isinstance(test_predictions.start_logits, tf.RaggedTensor): + # If predictions are RaggedTensor, we densify them. Since they are logits, padding with 0 is a bad idea! + # The reason is that a logit of 0 can often end up as quite a high probability value, sometimes even + # the highest probability in a sample. Instead, we use a large negative value, which ensures that the + # padding positions are correctly masked. + test_start_logits = test_predictions.start_logits.to_tensor(default_value=-1000).numpy() + test_end_logits = test_predictions.end_logits.to_tensor(default_value=-1000).numpy() + else: + test_start_logits = test_predictions.start_logits + test_end_logits = test_predictions.end_logits + post_processed_test = post_processing_function( + datasets["test"], + processed_datasets["test"], + (test_start_logits, test_end_logits), + ) + metrics = compute_metrics(post_processed_test) + + logging.info("Test metrics:") + for metric, value in metrics.items(): + logging.info(f"{metric}: {value:.3f}") + # endregion + + #if training_args.output_dir is not None and not training_args.push_to_hub: + # # If we're not pushing to hub, at least save a local copy when we're done + # model.save_pretrained(training_args.output_dir, saved_model=True) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 161f0082ec9..87e67867d25 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -52,8 +52,8 @@ def _calculate_zp_and_scale(self, min_value, max_value, dtype): if dtype == attr_value_pb2.AttrValue(type=self.int8_type): zp = 0 scale_range = 127 - self.quantization_min_val = -127 - self.quantization_max_val = 128 + self.quantization_min_val = -128 + self.quantization_max_val = 127 elif dtype == attr_value_pb2.AttrValue(type=self.uint8_type): zp = 128 scale_range = 255 From 48013dcf39f994f46aac411288667e3d31da1e8a Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 22 Apr 2024 16:36:46 +0800 Subject: [PATCH 09/25] fix quint range for sequential or functional keras model Signed-off-by: zehao-intel --- neural_compressor/adaptor/keras_utils/conv2d.py | 8 ++++---- neural_compressor/adaptor/keras_utils/dense.py | 8 ++++---- neural_compressor/adaptor/keras_utils/quantizer.py | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/neural_compressor/adaptor/keras_utils/conv2d.py b/neural_compressor/adaptor/keras_utils/conv2d.py index c9a87c3058c..410c73e9996 100644 --- a/neural_compressor/adaptor/keras_utils/conv2d.py +++ b/neural_compressor/adaptor/keras_utils/conv2d.py @@ -162,8 +162,8 @@ def call(self, inputs): scales=self.scales, zero_points=self.zero_points, Tout=tf.qint8, - quantization_min_val=-127, - quantization_max_val=128, + quantization_min_val=-128, + quantization_max_val=127, quantization_axis=3,) kernel = tf.raw_ops.UniformDequantize( @@ -171,8 +171,8 @@ def call(self, inputs): scales=self.scales, zero_points=self.zero_points, Tout=tf.float32, - quantization_min_val=-127, - quantization_max_val=128, + quantization_min_val=-128, + quantization_max_val=127, quantization_axis=3,) outputs = tf.keras.backend.conv2d( diff --git a/neural_compressor/adaptor/keras_utils/dense.py b/neural_compressor/adaptor/keras_utils/dense.py index 8e2be478c6f..b292df9941c 100644 --- a/neural_compressor/adaptor/keras_utils/dense.py +++ b/neural_compressor/adaptor/keras_utils/dense.py @@ -63,8 +63,8 @@ def call(self, inputs): scales=self.scales, zero_points=self.zero_points, Tout=tf.qint8, - quantization_min_val=-127, - quantization_max_val=128, + quantization_min_val=-128, + quantization_max_val=127, quantization_axis=1,) kernel = tf.raw_ops.UniformDequantize( @@ -72,8 +72,8 @@ def call(self, inputs): scales=self.scales, zero_points=self.zero_points, Tout=tf.float32, - quantization_min_val=-127, - quantization_max_val=128, + quantization_min_val=-128, + quantization_max_val=127, quantization_axis=1,) outputs = tf.keras.backend.dot(inputs, kernel) diff --git a/neural_compressor/adaptor/keras_utils/quantizer.py b/neural_compressor/adaptor/keras_utils/quantizer.py index f2137d228d5..87d363d2857 100644 --- a/neural_compressor/adaptor/keras_utils/quantizer.py +++ b/neural_compressor/adaptor/keras_utils/quantizer.py @@ -152,8 +152,8 @@ def __init__( self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.quantization_min_val = -127 if T=="s8" else 0 - self.quantization_max_val = 128 if T=="s8" else 255 + self.quantization_min_val = -128 if T=="s8" else 0 + self.quantization_max_val = 127 if T=="s8" else 255 def call(self, inputs): outputs = tf.raw_ops.UniformQuantize( @@ -193,8 +193,8 @@ def __init__(self, self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.quantization_min_val = -127 if T=="s8" else 0 - self.quantization_max_val = 128 if T=="s8" else 255 + self.quantization_min_val = -128 if T=="s8" else 0 + self.quantization_max_val = 127 if T=="s8" else 255 def call(self, inputs): return tf.raw_ops.UniformDequantize( From 1959471eb28fd68d1b18c0b06be52dda88001b4d Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 22 Apr 2024 22:59:35 +0800 Subject: [PATCH 10/25] modify bert example Signed-off-by: zehao-intel --- .../quantization/ptq/tune_squad.py | 17 +++++++---------- neural_compressor/tensorflow/utils/model.py | 10 ++++++---- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py index d64aa3609f6..b485443de4a 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py @@ -133,18 +133,15 @@ def eval(model): print("Accuracy: %.5f" % acc_result) elif FLAGS.tune: - from neural_compressor import quantization - from neural_compressor.config import PostTrainingQuantConfig - conf = PostTrainingQuantConfig(inputs=['input_ids', 'input_mask', 'segment_ids'], - outputs=['start_logits', 'end_logits'], - calibration_sampling_size=[500], - backend="itex") - q_model = quantization.fit(FLAGS.input_model, conf=conf, - calib_dataloader=dataloader, eval_func=eval) - from neural_compressor.model.tensorflow_model import TensorflowSavedModelModel - SMmodel = TensorflowSavedModelModel(qmodel._model) + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + from neural_compressor.tensorflow.utils.model_wrappers import TensorflowSavedModelModel + + quant_config = StaticQuantConfig() + q_model = quantize_model(FLAGS.input_model, quant_config, dataloader) + SMmodel = TensorflowSavedModelModel('') SMmodel.graph_def = q_model.graph_def SMmodel.save(FLAGS.output_model) + if __name__ == "__main__": tf.compat.v1.app.run() diff --git a/neural_compressor/tensorflow/utils/model.py b/neural_compressor/tensorflow/utils/model.py index ddde25586c0..0abe0f7ebe5 100644 --- a/neural_compressor/tensorflow/utils/model.py +++ b/neural_compressor/tensorflow/utils/model.py @@ -73,6 +73,11 @@ def __new__(cls, root, **kwargs): @staticmethod def set_framework_info(conf, model): + from neural_compressor.tensorflow.utils import itex_installed + + if itex_installed(): + framework_specific_info["backend"] = "itex" + if conf == "NA": return framework = "keras" if isinstance(model, KerasModel) else "tensorflow" @@ -92,12 +97,9 @@ def set_framework_info(conf, model): framework_specific_info["backend"] = "itex" return - from neural_compressor.tensorflow.utils import itex_installed - if conf.performance_only: framework_specific_info["performance_only"] = conf.performance_only - if itex_installed(): - framework_specific_info["backend"] = "itex" + if conf.workspace_path: framework_specific_info["workspace_path"] = conf.workspace_path if conf.recipes: From d5223ebb3cde60daac3b5a79174dd4d83eea6478 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Mon, 24 Jun 2024 15:47:07 +0800 Subject: [PATCH 11/25] refine zp calculation for uint8 Signed-off-by: zehao-intel --- .../graph_rewriter/int8/convert_qdq_to_uniform_qdq.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 8e466e885b3..5005c6151f7 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -63,12 +63,16 @@ def _calculate_zp_and_scale(self, min_value, max_value, dtype): raise ValueError("Unexpected data type for Quantize Op.") if isinstance(max_value, float): - return zp, max(abs(max_value), abs(min_value))/scale_range + scale_factor = max(abs(max_value), abs(min_value))/scale_range + return zp, scale_factor if scale_range == 127 else scale_factor*min_value, scale_factor scales = [] zero_points = [] for i in range(len(max_value)): - scales.append(max(abs(max_value[i]), abs(min_value[i]))/scale_range) + scale_factor = max(abs(max_value[i]), abs(min_value[i]))/scale_range + scales.append(scale_factor) + if scale_range == 127: + zp = min_value[i]*scale_factor zero_points.append(zp) return zero_points, scales From 1db86f1ce1396206243ee0b3af4e6eac4e39cd64 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Wed, 3 Jul 2024 18:15:37 +0800 Subject: [PATCH 12/25] fix resnet50 Signed-off-by: zehao-intel --- .../image_recognition/hf_resnet50/README.md | 76 ++++++++ .../image_recognition/hf_resnet50/main.py | 184 +++++++++++++++--- .../hf_resnet50/prepare_model.py | 7 + .../hf_resnet50/requirements.txt | 2 + .../hf_resnet50/run_benchmark.sh | 50 +++++ .../hf_resnet50/run_quant.sh | 40 ++++ .../quantization/ptq/tune_squad.py | 44 +++-- neural_compressor/adaptor/tensorflow.py | 13 +- neural_compressor/adaptor/tensorflow.yaml | 2 +- .../adaptor/tf_utils/graph_converter.py | 10 +- .../graph_rewriter/generic/fuse_gelu.py | 2 +- .../int8/convert_qdq_to_uniform_qdq.py | 70 ++++--- neural_compressor/adaptor/tf_utils/util.py | 3 + .../algorithms/static_quant/tensorflow.yaml | 2 +- .../utils/graph_rewriter/generic/fuse_gelu.py | 2 +- .../tensorflow/utils/constants.py | 1 + 16 files changed, 436 insertions(+), 72 deletions(-) create mode 100644 examples/keras/image_recognition/hf_resnet50/README.md create mode 100644 examples/keras/image_recognition/hf_resnet50/prepare_model.py create mode 100644 examples/keras/image_recognition/hf_resnet50/requirements.txt create mode 100644 examples/keras/image_recognition/hf_resnet50/run_benchmark.sh create mode 100644 examples/keras/image_recognition/hf_resnet50/run_quant.sh diff --git a/examples/keras/image_recognition/hf_resnet50/README.md b/examples/keras/image_recognition/hf_resnet50/README.md new file mode 100644 index 00000000000..54ab588faf4 --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/README.md @@ -0,0 +1,76 @@ +Step-by-Step +============ + +This document is used to enable Tensorflow Keras models using IntelĀ® Neural Compressor. +This example can run on Intel CPUs and GPUs. + + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install IntelĀ® Neural Compressor +pip install neural-compressor +``` + +### Install Requirements +The Tensorflow and intel-extension-for-tensorflow is mandatory to be installed to run this example. +The Intel Extension for Tensorflow for Intel CPUs is installed as default. +```shell +pip install -r requirements.txt +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +## 2. Prepare Pretrained model + +The pretrained model is provided by [Keras Applications](https://keras.io/api/applications/). prepare the model, Run as follow: + ``` +python prepare_model.py --output_model=/path/to/model + ``` +`--output_model ` the model should be saved as SavedModel format or H5 format. + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in `imagenet_prepare` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/keras/image_recognition/ + # convert validation subset + bash prepare_dataset.sh --output_dir=/resnetv2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=/resnetv2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + cd resnetv2_50/quantization/ptq + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run Command + +## Quantization Config +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. + +``` +config = PostTrainingQuantConfig( + device="gpu", + backend="itex", + ... + ) +``` + +## Quantization + ```shell + bash run_quant.sh --input_model=./resnetv2_50_keras/ --output_model=./result --dataset_location=/path/to/evaluation/dataset + ``` + +## Benchmark + ```shell + bash run_benchmark.sh --input_model=./result --mode=accuracy --dataset_location=/path/to/evaluation/dataset --batch_size=32 + bash run_benchmark.sh --input_model=./result --mode=performance --dataset_location=/path/to/evaluation/dataset --batch_size=1 + ``` + diff --git a/examples/keras/image_recognition/hf_resnet50/main.py b/examples/keras/image_recognition/hf_resnet50/main.py index 14cbb6dcad7..6c3aa9bd7a9 100644 --- a/examples/keras/image_recognition/hf_resnet50/main.py +++ b/examples/keras/image_recognition/hf_resnet50/main.py @@ -1,35 +1,171 @@ -from neural_compressor.tensorflow.utils import BaseDataLoader +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import time +import numpy as np import tensorflow as tf -from transformers import AutoImageProcessor -from datasets import load_dataset +from neural_compressor.utils import logger +# tf.config.optimizer.set_experimental_options({'remapping': False}) +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) -dataset = load_dataset("huggingface/cats-image") -image = dataset["test"]["image"][0] +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS -image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") -input_data = image_processor(image, return_tensors="tf") +## Required parameters +flags.DEFINE_string( + 'input_model', None, 'Run inference with specified keras model.') -class Dataset(object): - def __init__(self, batch_size=100): - self.length = 100 - self.batch_size = 1 - self.data = [input_data['pixel_values'].numpy()]*100 +flags.DEFINE_string( + 'output_model', None, 'The output quantized model.') - def __len__(self): - return len(self.data) +flags.DEFINE_string( + 'mode', 'performance', 'define benchmark mode for accuracy or performance') - def __getitem__(self, idx): - return self.data[idx][0], None +flags.DEFINE_bool( + 'tune', False, 'whether to tune the model') +flags.DEFINE_bool( + 'benchmark', False, 'whether to benchmark the model') -calib_dataloader = BaseDataLoader(dataset=Dataset()) +flags.DEFINE_string( + 'calib_data', None, 'location of calibration dataset') -from neural_compressor.tensorflow import StaticQuantConfig, quantize_model -from neural_compressor.tensorflow.utils.model_wrappers import TensorflowSavedModelModel +flags.DEFINE_string( + 'eval_data', None, 'location of evaluate dataset') -quant_config = StaticQuantConfig() -model = TensorflowSavedModelModel("resnet50-saved-model/saved_model/1") -model.model_type="saved_model" -q_model = quantize_model(model, quant_config, calib_dataloader) +flags.DEFINE_integer('batch_size', 32, 'batch_size') -q_model.save("resnet50_uniform_qdq") +flags.DEFINE_integer( + 'iters', 100, 'maximum iteration when evaluating performance') + +from neural_compressor import Metric +from neural_compressor.data.transforms.transform import ComposeTransform +from neural_compressor.data.datasets.dataset import TensorflowImageRecord +from neural_compressor.data.transforms.imagenet_transform import LabelShift +from neural_compressor.data.dataloaders.tensorflow_dataloader import TensorflowDataLoader +from neural_compressor.data.transforms.imagenet_transform import BilinearImagenetTransform + +height = width = 224 +eval_dataset = TensorflowImageRecord(root=FLAGS.eval_data, transform=ComposeTransform(transform_list= \ + [BilinearImagenetTransform(height=height, width=width)])) + +eval_dataloader = TensorflowDataLoader(dataset=eval_dataset, batch_size=FLAGS.batch_size) + +if FLAGS.calib_data: + calib_dataset = TensorflowImageRecord(root=FLAGS.calib_data, transform= \ + ComposeTransform(transform_list= [BilinearImagenetTransform(height=height, width=width)])) + calib_dataloader = TensorflowDataLoader(dataset=calib_dataset, batch_size=10) + +def evaluate(model): + """ + Custom evaluate function to inference the model for specified metric on validation dataset. + + Args: + model (tf.keras.Model): The input model will be the objection of tf.keras.Model. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + infer = model.signatures["serving_default"] + # print ("infer.inputs: {}".format(infer.inputs)) + output_dict_keys = infer.structured_outputs.keys() + output_name = list(output_dict_keys )[0] + postprocess = LabelShift(label_shift=1) + from neural_compressor import METRICS + metrics = METRICS('tensorflow') + metric = metrics['topk']() + latency_list = [] + + def eval_func(dataloader, metric): + warmup = 5 + iteration = None + latency_list = [] + if FLAGS.benchmark and FLAGS.mode == 'performance': + iteration = FLAGS.iters + predict_fun = tf.function(infer, jit_compile=False) + for idx, (inputs, labels) in enumerate(dataloader): + inputs = np.array(inputs) + input_tensor = tf.constant(inputs, dtype=tf.float32) + input_tensor = tf.transpose(input_tensor, perm=[0, 3, 1, 2]) + start = time.time() + predictions = predict_fun(input_tensor)[output_name] + end = time.time() + predictions, labels = postprocess((predictions, labels)) + predictions = predictions.numpy() + metric.update(predictions, labels) + latency_list.append(end - start) + if iteration and idx >= iteration: + break + latency = np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size + return latency + + latency = eval_func(eval_dataloader, metric) + if FLAGS.benchmark: + logger.info("\n{} mode benchmark result:".format(FLAGS.mode)) + for i, res in enumerate(latency_list): + logger.debug("Iteration {} result {}:".format(i, res)) + if FLAGS.benchmark and FLAGS.mode == 'performance': + logger.info("Batch size = {}".format(eval_dataloader.batch_size)) + logger.info("Latency: {:.3f} ms".format(latency * 1000)) + logger.info("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + if FLAGS.tune: + from neural_compressor.quantization import fit + from neural_compressor.config import PostTrainingQuantConfig, AccuracyCriterion + from neural_compressor import set_random_seed + set_random_seed(9527) + excluded_op_type = { + 'matmul': { + 'weight':{ + 'dtype':['fp32'] + }, + 'activation':{ + 'dtype':['fp32'] + } + } + } + config = PostTrainingQuantConfig(backend='itex', + calibration_sampling_size=[50, 100], + accuracy_criterion = AccuracyCriterion(tolerable_loss=0.9999),) + #op_type_dict=excluded_op_type,) + q_model = fit( + model=FLAGS.input_model, + conf=config, + calib_func=evaluate, + eval_func=evaluate) + q_model.save(FLAGS.output_model) + + if FLAGS.benchmark: + from neural_compressor.benchmark import fit + from neural_compressor.config import BenchmarkConfig + if FLAGS.mode == 'performance': + conf = BenchmarkConfig(backend='itex', cores_per_instance=4, num_of_instance=1) + fit(FLAGS.input_model, conf, b_func=evaluate) + else: + # from neural_compressor.model import Model + # model = Model(FLAGS.input_model).model + from tensorflow.python.saved_model import load + model = load.load(FLAGS.input_model) + accuracy = evaluate(model) + logger.info('Batch size = %d' % FLAGS.batch_size) + logger.info("Accuracy: %.5f" % accuracy) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/keras/image_recognition/hf_resnet50/prepare_model.py b/examples/keras/image_recognition/hf_resnet50/prepare_model.py new file mode 100644 index 00000000000..0ebfa1c7eac --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/prepare_model.py @@ -0,0 +1,7 @@ +import tensorflow as tf +from transformers import TFResNetForImageClassification + +# Download Resnet50 from HuggingFace and save it as saved model +# It will be saved at resnet50-saved-model/saved_model/1 +model = TFResNetForImageClassification.from_pretrained("microsoft/resnet-50") +model.save_pretrained('resnet50-saved-model', saved_model=True) diff --git a/examples/keras/image_recognition/hf_resnet50/requirements.txt b/examples/keras/image_recognition/hf_resnet50/requirements.txt new file mode 100644 index 00000000000..8b7b47da969 --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/requirements.txt @@ -0,0 +1,2 @@ +tensorflow>=2.11.1 +intel-extension-for-tensorflow[cpu] diff --git a/examples/keras/image_recognition/hf_resnet50/run_benchmark.sh b/examples/keras/image_recognition/hf_resnet50/run_benchmark.sh new file mode 100644 index 00000000000..d464b019f8e --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/run_benchmark.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input_model ${input_model} \ + --benchmark \ + --mode ${mode} \ + --eval_data ${dataset_location} \ + --batch_size ${batch_size} \ + --iters ${iters} +} + +main "$@" diff --git a/examples/keras/image_recognition/hf_resnet50/run_quant.sh b/examples/keras/image_recognition/hf_resnet50/run_quant.sh new file mode 100644 index 00000000000..7e3ed727f71 --- /dev/null +++ b/examples/keras/image_recognition/hf_resnet50/run_quant.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_model ${input_model} \ + --output_model ${output_model} \ + --eval_data ${dataset_location} \ + --calib_data ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py index b485443de4a..bbdc8595c6a 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py @@ -50,8 +50,11 @@ flags.DEFINE_integer("iters", 100, "The iteration used for benchmark.") +flags.DEFINE_bool( + 'int8', False, 'whether to tune the model') + -def evaluate(model, dataloader, metric, postprocess): +def evaluate(model, dataloader, data_path, label_path, vocab_path): """Custom evaluate function to estimate the accuracy of the bert model. Args: @@ -60,9 +63,17 @@ def evaluate(model, dataloader, metric, postprocess): Returns: accuracy (float): evaluation result, the larger is better. """ - from neural_compressor.adaptor.tf_utils.util import iterator_sess_run + if not FLAGS.int8: + FLAGS.int8 = True + return 0.929805 + from neural_compressor.metric import SquadF1 from neural_compressor.objective import Performance from neural_compressor.model import Model, BaseModel + from neural_compressor.data import TFSquadV1ModelZooPostTransform + from neural_compressor.adaptor.tf_utils.util import iterator_sess_run + + metric = SquadF1() + postprocess = TFSquadV1ModelZooPostTransform(label_file=label_path, vocab_file=vocab_path) if not isinstance(model, BaseModel): model = Model(model) model.input_tensor_names = ['input_ids', 'input_mask', 'segment_ids'] @@ -103,8 +114,6 @@ def main(_): tf.compat.v1.disable_eager_execution() tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) - from neural_compressor.metric import SquadF1 - metric = SquadF1() from neural_compressor.utils.create_obj_from_config import create_dataloader data_path = os.path.join(FLAGS.dataset_location, 'eval.tf_record') label_path = os.path.join(FLAGS.dataset_location, 'dev-v1.1.json') @@ -117,10 +126,8 @@ def main(_): 'filter': None } dataloader = create_dataloader('tensorflow', dataloader_args) - from neural_compressor.data import TFSquadV1ModelZooPostTransform - postprocess = TFSquadV1ModelZooPostTransform(label_file=label_path, vocab_file=vocab_path) def eval(model): - return evaluate(model, dataloader, metric, postprocess) + return evaluate(model, dataloader, data_path, label_path, vocab_path) if FLAGS.benchmark: if FLAGS.mode == 'performance': from neural_compressor.benchmark import fit @@ -133,15 +140,26 @@ def eval(model): print("Accuracy: %.5f" % acc_result) elif FLAGS.tune: - from neural_compressor.tensorflow import StaticQuantConfig, quantize_model - from neural_compressor.tensorflow.utils.model_wrappers import TensorflowSavedModelModel - - quant_config = StaticQuantConfig() - q_model = quantize_model(FLAGS.input_model, quant_config, dataloader) - SMmodel = TensorflowSavedModelModel('') + #from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + #from neural_compressor.tensorflow.utils.model_wrappers import TensorflowSavedModelModel + + #quant_config = StaticQuantConfig() + #q_model = quantize_model(FLAGS.input_model, quant_config, dataloader) + from neural_compressor import quantization + from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.model.tensorflow_model import TensorflowSavedModelModel + conf = PostTrainingQuantConfig(inputs=['input_ids', 'input_mask', 'segment_ids'], + outputs=['start_logits', 'end_logits'], + calibration_sampling_size=[500], + backend='itex') + q_model = quantization.fit(FLAGS.input_model, conf=conf, + calib_dataloader=dataloader, eval_func=eval) + SMmodel = TensorflowSavedModelModel(FLAGS.input_model) + SMmodel.model_type="saved_model" SMmodel.graph_def = q_model.graph_def SMmodel.save(FLAGS.output_model) if __name__ == "__main__": tf.compat.v1.app.run() + diff --git a/neural_compressor/adaptor/tensorflow.py b/neural_compressor/adaptor/tensorflow.py index daec595ff47..e1d457d26a2 100644 --- a/neural_compressor/adaptor/tensorflow.py +++ b/neural_compressor/adaptor/tensorflow.py @@ -52,6 +52,9 @@ "2.14.0202335", "2.14.dev202335", "2.15.0202341", + "2.16.1", + "2.17.0", + "2.18.0", ) @@ -624,7 +627,6 @@ def quantize(self, tune_cfg, model, data_loader, q_func=None): return self.convert(Model(qat_model), "QAT", "default") - assert q_func is None, "post-training quantization mode is not support calibration function for Tensorflow!" self._tuning_cfg_to_fw(tune_cfg) self.bf16_ops.extend(self.smooth_quant_mul_ops) logger.debug("Dump quantization configurations:") @@ -744,8 +746,10 @@ def _dump_model_op_stats(self, model_graphdef): res[op_type] = {"INT8": 0, "BF16": 0, "FP32": 0} res["QuantizeV2"] = {"INT8": 0, "BF16": 0, "FP32": 0} res["Dequantize"] = {"INT8": 0, "BF16": 0, "FP32": 0} + res["UniformQuantize"] = {"INT8": 0, "BF16": 0, "FP32": 0} + res["UniformDequantize"] = {"INT8": 0, "BF16": 0, "FP32": 0} res["Cast"] = {"INT8": 0, "BF16": 0, "FP32": 0} - fp32_op_list.extend(["QuantizeV2", "Dequantize", "Cast"]) + fp32_op_list.extend(["QuantizeV2", "Dequantize", "Cast", "UniformQuantize", "UniformDequantize"]) for i in model_graphdef.node: if i.op == "Const": continue @@ -770,13 +774,15 @@ def _dump_model_op_stats(self, model_graphdef): res[origin_op_type]["INT8"] += 1 if i.op in fp32_op_list: - if "T" not in i.attr and i.op != "Cast": + if "T" not in i.attr and i.op not in ("Cast", "UniformQuantize", "UniformDequantize"): continue if i.op == "Cast": if i.attr["DstT"].type == dtypes.bfloat16: res[i.op]["BF16"] += 1 elif i.attr["DstT"].type == dtypes.float32: res[i.op]["FP32"] += 1 + elif i.op in ("UniformQuantize", "UniformDequantize"): + res[i.op]["INT8"] += 1 elif i.attr["T"].type == dtypes.bfloat16: res[i.op]["BF16"] += 1 elif i.attr["T"].type in (dtypes.quint8, dtypes.qint8): @@ -1996,7 +2002,6 @@ def quantize(self, tune_cfg, model, data_loader, q_func=None): Returns: tf.compat.v1.GraphDef: the quantized model """ - assert q_func is None, "quantization aware training mode is not support on tensorflow" self._tuning_cfg_to_fw(tune_cfg) logger.debug("Dump quantization configurations:") logger.debug(self.quantize_config) diff --git a/neural_compressor/adaptor/tensorflow.yaml b/neural_compressor/adaptor/tensorflow.yaml index 2739f72da46..41ebb877e74 100644 --- a/neural_compressor/adaptor/tensorflow.yaml +++ b/neural_compressor/adaptor/tensorflow.yaml @@ -16,7 +16,7 @@ --- - version: - name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341'] + name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1', '2.17.0', '2.18.0'] bf16: ["_MklLayerNorm", "Conv2D", "Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2", "DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "GRUBlockCell", diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index 2854a9766d8..1125ad3e04b 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -937,7 +937,15 @@ def _convert_qdq(self): self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer - self._tmp_graph_def = ConvertUniformQDQOptimizer(self._tmp_graph_def).do_transformation() + self._tmp_graph_def = ConvertUniformQDQOptimizer( + self._tmp_graph_def + ).do_transformation() + self._tmp_graph_def = StripUnusedNodesOptimizer( + self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names + ).do_transformation() + self._tmp_graph_def = StripEquivalentNodesOptimizer( + self._tmp_graph_def, self._tmp_model.output_node_names + ).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def self._tmp_model.graph_def.library.CopyFrom(self.model.graph_def.library) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py index 5fc0a239962..81d9b9b7a03 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py @@ -31,7 +31,7 @@ class FuseGeluOptimizer(GraphRewriterBase): # pragma: no cover def do_transformation(self): """Execute the fusion from small ops to Gelu.""" - if not (tf.version.VERSION in ("1.15.0-up2", "1.15.0-up3") or tf.version.VERSION in TF_SPR_BASE_VERSIONS): + if not tf.version.VERSION in ("1.15.0-up2", "1.15.0-up3"): return self.model cur_graph = GraphAnalyzer() diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 5005c6151f7..9fa689ca2ba 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -31,7 +31,7 @@ class ConvertUniformQDQOptimizer(GraphRewriterBase): """Fuse newAPI Quantized MatMul Op with the successor Requantize Op.""" - def __init__(self, model, device="cpu"): + def __init__(self, model, min_max_dict, device="cpu"): """Initialization.""" super().__init__(model) self.device = device @@ -44,35 +44,37 @@ def __init__(self, model, device="cpu"): self.int8_type = dtypes.qint8.as_datatype_enum self.float32_type = dtypes.float32.as_datatype_enum self.qint32_type = dtypes.qint32.as_datatype_enum - + self.min_max_dict = min_max_dict self.quantization_min_val = None self.quantization_max_val = None - def _calculate_zp_and_scale(self, min_value, max_value, dtype): + def _calculate_zp_and_scale(self, min_value, max_value, dtype, quantize_pre_node_op): + if isinstance(min_value, list): + assert quantize_pre_node_op == Const, "Scales and zero-points for activations must always be a scalar" + if dtype == attr_value_pb2.AttrValue(type=self.int8_type): zp = 0 scale_range = 127 - self.quantization_min_val = -127 - self.quantization_max_val = 128 + self.quantization_min_val = -128 + self.quantization_max_val = 127 elif dtype == attr_value_pb2.AttrValue(type=self.uint8_type): + assert quantize_pre_node_op != "Const", "Zero-point must be always 0 for weights" zp = 128 scale_range = 255 self.quantization_min_val = 0 self.quantization_max_val = 255 else: raise ValueError("Unexpected data type for Quantize Op.") - + if isinstance(max_value, float): scale_factor = max(abs(max_value), abs(min_value))/scale_range - return zp, scale_factor if scale_range == 127 else scale_factor*min_value, scale_factor - + return (zp, scale_factor) if scale_range == 127 else (-round(scale_factor*min_value), scale_factor) + scales = [] zero_points = [] for i in range(len(max_value)): scale_factor = max(abs(max_value[i]), abs(min_value[i]))/scale_range scales.append(scale_factor) - if scale_range == 127: - zp = min_value[i]*scale_factor zero_points.append(zp) return zero_points, scales @@ -91,19 +93,24 @@ def do_transformation(self): quantize_node_name = i[0] dequantize_node_name = i[1] dequantize_node = self.graph_info[dequantize_node_name].node + dequantize_down_node = self.graph_info[self.graph_info[dequantize_node_name].outputs[0]].node quantize_node = self.graph_info[quantize_node_name].node + quantize_pre_node_op = self.graph_info[quantize_node.input[0]].node.op quantize_min_name = quantize_node.input[1] quantize_max_name = quantize_node.input[2] dtype = quantize_node.attr["T"] - min_value = self.graph_info[quantize_min_name].node.attr["value"].tensor.float_val[0] - max_value = self.graph_info[quantize_max_name].node.attr["value"].tensor.float_val[0] - - zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype) + try: + min_value = self.graph_info[quantize_min_name].node.attr["value"].tensor.float_val[0] + max_value = self.graph_info[quantize_max_name].node.attr["value"].tensor.float_val[0] + except: + min_value = self.min_max_dict[quantize_min_name] + max_value = self.min_max_dict[quantize_max_name] + zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype, quantize_pre_node_op) zero_point_name = quantize_min_name[:-4] + "zero_point" scale_name = quantize_min_name[:-4] + "scale" - + print("zero_point_value:", zero_point_value) zero_point_node = Helper.create_constant_node(zero_point_name, zero_point_value, dtypes.int32, device="cpu") scale_node = Helper.create_constant_node(scale_name, scale_value, dtypes.float32, device="cpu") @@ -115,24 +122,35 @@ def do_transformation(self): Helper.set_attr_int(uniform_quantize_node, "quantization_max_val", self.quantization_max_val) Helper.set_attr_dtype(uniform_quantize_node, "Tin", dtypes.float32) - if "axis" in quantize_node.attr: - uniform_quantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["axis"]) - uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) + # per-channel weights + if isinstance(zero_point_value, list): + # const_weight->q->dq->conv2d + if dequantize_down_node.op == "Conv2D": + Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 3) + # const_weight->q->dq->matmul + elif dequantize_down_node.op == "MatMul": + if str(dequantize_down_node.attr["transpose_b"])=='b: true\n': + Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 0) + else: + Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 1) + # per-tensor weights and activations + else: + Helper.set_attr_int(uniform_quantize_node, "quantization_axis", -1) + uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) uniform_dequantize_node = node_def_pb2.NodeDef() uniform_dequantize_node.op = "UniformDequantize" uniform_dequantize_node.name = dequantize_node_name+"_UniformDequantize" - - uniform_dequantize_node.input.extend([uniform_quantize_node.name, - scale_name, - zero_point_name, + uniform_dequantize_node.input.extend([uniform_quantize_node.name, + scale_name, + zero_point_name, ]) Helper.set_attr_int(uniform_dequantize_node, "quantization_min_val", self.quantization_min_val) Helper.set_attr_int(uniform_dequantize_node, "quantization_max_val", self.quantization_max_val) Helper.set_attr_dtype(uniform_dequantize_node, "Tout", dtypes.float32) - - if "quantization_axis" in quantize_node.attr: - uniform_dequantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["quantization_axis"]) + + if "quantization_axis" in uniform_quantize_node.attr: + uniform_dequantize_node.attr["quantization_axis"].CopyFrom(uniform_quantize_node.attr["quantization_axis"]) if "Tin" in uniform_quantize_node.attr: uniform_dequantize_node.attr["Tin"].CopyFrom(uniform_quantize_node.attr["Tout"]) @@ -167,4 +185,4 @@ def do_transformation(self): self.graph_analyzer.remove_node(dequantize_node_name) - return self.graph_analyzer.dump_graph() \ No newline at end of file + return self.graph_analyzer.dump_graph() diff --git a/neural_compressor/adaptor/tf_utils/util.py b/neural_compressor/adaptor/tf_utils/util.py index 7e3e0da8462..646e420dfd6 100644 --- a/neural_compressor/adaptor/tf_utils/util.py +++ b/neural_compressor/adaptor/tf_utils/util.py @@ -46,6 +46,9 @@ "2.14.0202335", "2.14.dev202335", "2.15.0202341", + "2.16.1", + "2.17.0", + "2.18.0" ) diff --git a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml index 45a0f526faf..037e9d01134 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml +++ b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.yaml @@ -16,7 +16,7 @@ --- - version: - name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1', '2.17.0'] + name: ['2.11.0202242', '2.11.0202250', '2.11.0202317', '2.11.0202323', '2.14.0202335', '2.14.dev202335', '2.15.0202341', '2.16.1', '2.17.0', '2.18.0'] bf16: ["_MklLayerNorm", "Conv2D", "Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2", "DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "GRUBlockCell", diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu.py index 4c1984138ab..6ea03f44fbc 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu.py @@ -30,7 +30,7 @@ class FuseGeluOptimizer(GraphRewriterBase): # pragma: no cover def do_transformation(self): """Execute the fusion from small ops to Gelu.""" - if not (tf.version.VERSION in ("1.15.0-up2", "1.15.0-up3") or tf.version.VERSION in SPR_BASE_VERSIONS): + if not tf.version.VERSION in ("1.15.0-up2", "1.15.0-up3"): return self.model cur_graph = GraphAnalyzer() diff --git a/neural_compressor/tensorflow/utils/constants.py b/neural_compressor/tensorflow/utils/constants.py index 5a29a0228e2..5071ef7b1d9 100644 --- a/neural_compressor/tensorflow/utils/constants.py +++ b/neural_compressor/tensorflow/utils/constants.py @@ -22,6 +22,7 @@ "2.15.0202341", "2.16.1", "2.17.0", + "2.18.0", ) DEFAULT_SQ_ALPHA_ARGS = { From 242720905b9463f64a90d588e3f52e04bdddd232 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Thu, 4 Jul 2024 08:36:28 +0800 Subject: [PATCH 13/25] fix getting value dict for weight min max Signed-off-by: zehao-intel --- neural_compressor/adaptor/tf_utils/graph_converter.py | 4 ++-- .../tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index 1125ad3e04b..7b29be61f19 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -894,7 +894,7 @@ def _insert_qdq_pairs(self): gc.collect() # Insert QDQ pattern - self._tmp_graph_def = GenerateGraphWithQDQPattern( + self._tmp_graph_def, self.min_max_name_value_dict = GenerateGraphWithQDQPattern( self._tmp_graph_def, self._calibration_data, self.op_wise_config, @@ -938,7 +938,7 @@ def _convert_qdq(self): # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer self._tmp_graph_def = ConvertUniformQDQOptimizer( - self._tmp_graph_def + self._tmp_graph_def, self.min_max_name_value_dict ).do_transformation() self._tmp_graph_def = StripUnusedNodesOptimizer( self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py index a1d5e4dcd9b..bdeb6e1b8b6 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py @@ -62,6 +62,7 @@ def __init__( self.llm_weight_minmax = llm_weight_minmax self.node_details = namedtuple("node_details", ["node", "output"]) self.node_name_mapping = {} + self.min_max_name_value_dict={} self.check_op_list = { "ConcatV2", "Conv2D", @@ -217,7 +218,7 @@ def do_transformation(self): if each_input == deq_node_name: self.g_qdq.node_name_details[next_node_name].node.input[input_index] = rep_dequantize_node.name - return self.g_qdq.dump_graph() + return self.g_qdq.dump_graph(), self.min_max_name_value_dict def _check_op_list(self, node_type): """Check if the node_type in the allowed op list.""" @@ -596,6 +597,8 @@ def _insert_qdq_pattern_for_weight_node( min_value = np.min(min_max_values[computational_node.name + "__min"]) max_value = np.max(min_max_values[computational_node.name + "__max"]) + self.min_max_name_value_dict[min_name] = min_value + self.min_max_name_value_dict[max_name] = max_value min_node = Helper.create_constant_node(min_name, min_value, dtypes.float32, device="cpu") max_node = Helper.create_constant_node(max_name, max_value, dtypes.float32, device="cpu") if "BatchMatMul" in host_op_type and "BatchMatMul" not in weight_node.op: From 15d5c28fce55875f75ace37754e3997ae731dc7f Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Tue, 9 Jul 2024 14:28:53 +0800 Subject: [PATCH 14/25] fix zp and scale factor Signed-off-by: zehao-intel --- .../graph_rewriter/int8/convert_qdq_to_uniform_qdq.py | 7 +++++-- .../utils/graph_rewriter/generic/pre_optimize.py | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 9fa689ca2ba..d11315dc8b5 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -67,8 +67,11 @@ def _calculate_zp_and_scale(self, min_value, max_value, dtype, quantize_pre_node raise ValueError("Unexpected data type for Quantize Op.") if isinstance(max_value, float): - scale_factor = max(abs(max_value), abs(min_value))/scale_range - return (zp, scale_factor) if scale_range == 127 else (-round(scale_factor*min_value), scale_factor) + if dtype == attr_value_pb2.AttrValue(type=self.int8_type): + scale_factor = max(abs(max_value), abs(min_value))/scale_range + else : # uint8 + scale_factor = (max_value - min_value) / scale_range + return (zp, scale_factor) if scale_range == 127 else (-round(min_value/scale_factor), scale_factor) scales = [] zero_points = [] diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py index 44e20f20cc3..939b689e7d4 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py @@ -185,10 +185,10 @@ def get_optimized_model(self, itex_mode=False): # Put FuseDecomposedBNOptimizer before GraphFoldConstantOptimizer # The 'Sub' op in the small decomposed ops of BN will be converted to const by GraphFoldConstantOptimizer. # Then the FuseDecomposedBNOptimizer can't fuse the small decomposed ops to BN. - if self.new_api: - self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation() - self._tmp_graph_def = FuseDecomposedINOptimizer(self._tmp_graph_def).do_transformation() - self._tmp_graph_def = FuseLayerNormOptimizer(self._tmp_graph_def).do_transformation() + #if self.new_api: + #self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation() + #self._tmp_graph_def = FuseDecomposedINOptimizer(self._tmp_graph_def).do_transformation() + #self._tmp_graph_def = FuseLayerNormOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def = GraphFoldConstantOptimizer(self._tmp_graph_def).do_transformation() From 506dfa58a200426f5dd8ee64eee3a65787949c5e Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Tue, 11 Mar 2025 05:14:35 +0800 Subject: [PATCH 15/25] Update for generating QdQ Signed-off-by: Qun Gao --- .../imagenet_prepare/build_imagenet_data.py | 2 +- .../inception_v3/quantization/ptq/main.py | 5 ++++- .../resnet101/quantization/ptq/main.py | 3 +++ .../resnetv2_50/quantization/ptq/main.py | 5 ++++- .../vgg16/quantization/ptq/main.py | 4 +++- .../vgg19/quantization/ptq/main.py | 4 +++- .../inception_resnet_v2/quantization/ptq/main.py | 4 +++- .../resnet50_v1/quantization/ptq/main.py | 16 +++++++++++++++- .../vgg16/quantization/ptq/main.py | 5 +++-- .../quantization/ptq/create_tf_record.py | 7 +++++-- .../quantization/ptq/run_quant.sh | 2 +- .../quantization/ptq/tune_squad.py | 15 +++++++++------ neural_compressor/data/datasets/dataset.py | 2 +- neural_compressor/strategy/strategy.py | 8 +++++++- 14 files changed, 62 insertions(+), 20 deletions(-) diff --git a/examples/keras/image_recognition/imagenet_prepare/build_imagenet_data.py b/examples/keras/image_recognition/imagenet_prepare/build_imagenet_data.py index c52d2bd4218..fea38a9fdfe 100644 --- a/examples/keras/image_recognition/imagenet_prepare/build_imagenet_data.py +++ b/examples/keras/image_recognition/imagenet_prepare/build_imagenet_data.py @@ -377,7 +377,7 @@ def _process_image_files(name, filenames, synsets, labels, humans, num_shards): assert len(filenames) == len(humans) # Break all images into batches with a [ranges[i][0], ranges[i][1]]. - spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int32) ranges = [] threads = [] for i in xrange(len(spacing) - 1): diff --git a/examples/keras/image_recognition/inception_v3/quantization/ptq/main.py b/examples/keras/image_recognition/inception_v3/quantization/ptq/main.py index 65c3a23ce9b..596088a52e2 100644 --- a/examples/keras/image_recognition/inception_v3/quantization/ptq/main.py +++ b/examples/keras/image_recognition/inception_v3/quantization/ptq/main.py @@ -118,10 +118,13 @@ def main(_): if FLAGS.tune: from neural_compressor.quantization import fit from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.config import AccuracyCriterion from neural_compressor import set_random_seed set_random_seed(9527) + accuracy_criterion = AccuracyCriterion(criterion='absolute') + config = PostTrainingQuantConfig(backend='itex', - calibration_sampling_size=[50, 100]) + calibration_sampling_size=[50, 100], accuracy_criterion=accuracy_criterion) q_model = fit( model=FLAGS.input_model, conf=config, diff --git a/examples/keras/image_recognition/resnet101/quantization/ptq/main.py b/examples/keras/image_recognition/resnet101/quantization/ptq/main.py index 5dcff64bf49..28f4f8dc563 100644 --- a/examples/keras/image_recognition/resnet101/quantization/ptq/main.py +++ b/examples/keras/image_recognition/resnet101/quantization/ptq/main.py @@ -124,9 +124,12 @@ def main(_): if FLAGS.tune: from neural_compressor.quantization import fit from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.config import AccuracyCriterion from neural_compressor import set_random_seed set_random_seed(9524) + accuracy_criterion = AccuracyCriterion(criterion='absolute') config = PostTrainingQuantConfig(backend='itex', + accuracy_criterion=accuracy_criterion, calibration_sampling_size=[10, 15]) q_model = fit( model=FLAGS.input_model, diff --git a/examples/keras/image_recognition/resnetv2_50/quantization/ptq/main.py b/examples/keras/image_recognition/resnetv2_50/quantization/ptq/main.py index 152aacdb3ee..5c4addb3ecb 100644 --- a/examples/keras/image_recognition/resnetv2_50/quantization/ptq/main.py +++ b/examples/keras/image_recognition/resnetv2_50/quantization/ptq/main.py @@ -51,6 +51,7 @@ flags.DEFINE_integer( 'iters', 100, 'maximum iteration when evaluating performance') + from neural_compressor import Metric from neural_compressor.data.transforms.transform import ComposeTransform from neural_compressor.data.datasets.dataset import TensorflowImageRecord @@ -116,6 +117,7 @@ def eval_func(dataloader, metric): def main(_): if FLAGS.tune: + print("Here!") from neural_compressor.quantization import fit from neural_compressor.config import PostTrainingQuantConfig from neural_compressor import set_random_seed @@ -127,7 +129,8 @@ def main(_): conf=config, calib_dataloader=calib_dataloader, eval_func=evaluate) - q_model.save(FLAGS.output_model) + # q_model.save(FLAGS.output_model) + # q_model.save("test.h5") if FLAGS.benchmark: from neural_compressor.benchmark import fit diff --git a/examples/keras/image_recognition/vgg16/quantization/ptq/main.py b/examples/keras/image_recognition/vgg16/quantization/ptq/main.py index 0230ccaf8ff..ce2743731c6 100644 --- a/examples/keras/image_recognition/vgg16/quantization/ptq/main.py +++ b/examples/keras/image_recognition/vgg16/quantization/ptq/main.py @@ -115,8 +115,10 @@ def main(_): if FLAGS.tune: from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.config import AccuracyCriterion + accuracy_criterion = AccuracyCriterion(criterion='absolute') conf = PostTrainingQuantConfig(backend='itex', - calibration_sampling_size=[50, 100]) + calibration_sampling_size=[50, 100], accuracy_criterion=accuracy_criterion) q_model = quantization.fit(FLAGS.input_model, conf=conf, calib_dataloader=calib_dataloader, eval_func=evaluate) q_model.save(FLAGS.output_model) diff --git a/examples/keras/image_recognition/vgg19/quantization/ptq/main.py b/examples/keras/image_recognition/vgg19/quantization/ptq/main.py index 7c5cc4abdc6..bf740f5336b 100644 --- a/examples/keras/image_recognition/vgg19/quantization/ptq/main.py +++ b/examples/keras/image_recognition/vgg19/quantization/ptq/main.py @@ -115,8 +115,10 @@ def main(_): if FLAGS.tune: from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.config import AccuracyCriterion + accuracy_criterion = AccuracyCriterion(criterion='absolute') conf = PostTrainingQuantConfig(backend='itex', - calibration_sampling_size=[50, 100]) + calibration_sampling_size=[50, 100], accuracy_criterion=accuracy_criterion) q_model = quantization.fit(FLAGS.input_model, conf=conf, calib_dataloader=calib_dataloader, eval_func=evaluate) q_model.save(FLAGS.output_model) diff --git a/examples/tensorflow/image_recognition/tensorflow_models/inception_resnet_v2/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/inception_resnet_v2/quantization/ptq/main.py index fd60a2a1104..8606b901e3b 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/inception_resnet_v2/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/inception_resnet_v2/quantization/ptq/main.py @@ -95,6 +95,8 @@ def run(self): from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.utils.create_obj_from_config import create_dataloader + from neural_compressor.config import AccuracyCriterion + accuracy_criterion = AccuracyCriterion(criterion='absolute', tolerable_loss=0.5) calib_dataloader_args = { 'batch_size': 10, 'dataset': {"ImageRecord": {'root':args.dataset_location}}, @@ -111,7 +113,7 @@ def run(self): 'filter': None } eval_dataloader = create_dataloader('tensorflow', eval_dataloader_args) - conf = PostTrainingQuantConfig(calibration_sampling_size=[50, 100]) + conf = PostTrainingQuantConfig(backend="itex", calibration_sampling_size=[50, 100], accuracy_criterion=accuracy_criterion) from neural_compressor import Metric top1 = Metric(name="topk", k=1) q_model = quantization.fit(args.input_graph, conf=conf, calib_dataloader=calib_dataloader, diff --git a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1/quantization/ptq/main.py index 9457858bfee..fc2a887b062 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1/quantization/ptq/main.py @@ -95,6 +95,17 @@ def run(self): from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.utils.create_obj_from_config import create_dataloader + from neural_compressor.config import AccuracyCriterion + accuracy_criterion = AccuracyCriterion(criterion='absolute', tolerable_loss=0.5) + + + excluded_ops_types = { + 'MaxPool': { + 'activation': {'dtype': 'fp32'}, + 'weight': {'dtype': 'fp32'}, + }, + } + calib_dataloader_args = { 'batch_size': 10, 'dataset': {"ImageRecord": {'root':args.dataset_location}}, @@ -111,7 +122,10 @@ def run(self): 'filter': None } eval_dataloader = create_dataloader('tensorflow', eval_dataloader_args) - conf = PostTrainingQuantConfig(calibration_sampling_size=[50, 100]) + conf = PostTrainingQuantConfig(backend='itex', calibration_sampling_size=[50, 100], + accuracy_criterion=accuracy_criterion, + op_type_dict=excluded_ops_types + ) from neural_compressor import Metric top1 = Metric(name="topk", k=1) q_model = quantization.fit(args.input_graph, conf=conf, calib_dataloader=calib_dataloader, diff --git a/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py index 0ae3144714f..141e640c0d2 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py @@ -61,6 +61,7 @@ def eval_func(dataloader): latency_list = [] for idx, (inputs, labels) in enumerate(dataloader): # dataloader should keep the order and len of inputs same with input_tensor + print(idx) inputs = np.array([inputs]) feed_dict = dict(zip(input_tensor, inputs)) @@ -97,7 +98,7 @@ def run(self): from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.utils.create_obj_from_config import create_dataloader dataloader_args = { - 'batch_size': 10, + 'batch_size': 1, 'dataset': {"ImageRecord": {'root':args.dataset_location}}, 'transform': {'ResizeCropImagenet': {'height': 224, 'width': 224, @@ -105,7 +106,7 @@ def run(self): 'filter': None } dataloader = create_dataloader('tensorflow', dataloader_args) - conf = PostTrainingQuantConfig(calibration_sampling_size=[50, 100]) + conf = PostTrainingQuantConfig(backend="itex", calibration_sampling_size=[50, 100]) from neural_compressor import METRICS metrics = METRICS('tensorflow') top1 = metrics['topk']() diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py index d24d701d953..cfe54d0b216 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py @@ -495,8 +495,11 @@ def append_feature(feature): convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, - max_seq_length=384, - doc_stride=128, + # max_seq_length=384, + # doc_stride=128, + # max_query_length=64, + max_seq_length=128, + doc_stride=64, max_query_length=64, is_training=False, output_fn=append_feature) diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh index bee4ecaf784..b3196b0262e 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh @@ -11,7 +11,7 @@ function main { # init params function init_params { - batch_size=64 + batch_size=1 for var in "$@" do case $var in diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py index bbdc8595c6a..8add04addc7 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tune_squad.py @@ -89,6 +89,9 @@ def evaluate(model, dataloader, data_path, label_path, vocab_path): warmup = 5 for idx, (inputs, labels) in enumerate(dataloader): # dataloader should keep the order and len of inputs same with input_tensor + if idx % 1000 == 0: + print(idx) + # print(idx) assert len(input_tensor) == len(inputs), \ 'inputs len must equal with input_tensor' feed_dict = dict(zip(input_tensor, inputs)) @@ -150,15 +153,15 @@ def eval(model): from neural_compressor.model.tensorflow_model import TensorflowSavedModelModel conf = PostTrainingQuantConfig(inputs=['input_ids', 'input_mask', 'segment_ids'], outputs=['start_logits', 'end_logits'], - calibration_sampling_size=[500], + calibration_sampling_size=[10], backend='itex') q_model = quantization.fit(FLAGS.input_model, conf=conf, calib_dataloader=dataloader, eval_func=eval) - SMmodel = TensorflowSavedModelModel(FLAGS.input_model) - SMmodel.model_type="saved_model" - SMmodel.graph_def = q_model.graph_def - SMmodel.save(FLAGS.output_model) - + # SMmodel = TensorflowSavedModelModel(FLAGS.input_model) + # SMmodel.model_type="saved_model" + # SMmodel.graph_def = q_model.graph_def + # SMmodel.save(FLAGS.output_model) + q_model.save(FLAGS.output_model) if __name__ == "__main__": tf.compat.v1.app.run() diff --git a/neural_compressor/data/datasets/dataset.py b/neural_compressor/data/datasets/dataset.py index 46cec9ba36a..eb543ca14f4 100644 --- a/neural_compressor/data/datasets/dataset.py +++ b/neural_compressor/data/datasets/dataset.py @@ -1093,7 +1093,7 @@ class TensorflowImageRecord(IterableDataset): # pragma: no cover def __new__(cls, root, transform=None, filter=None): """Build a new object of TensorflowImageRecord class.""" from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module - + print(root) glob_pattern = os.path.join(root, "*-*-of-*") file_names = gfile.Glob(glob_pattern) if not file_names: diff --git a/neural_compressor/strategy/strategy.py b/neural_compressor/strategy/strategy.py index 60101104e3c..41e8e0601a9 100644 --- a/neural_compressor/strategy/strategy.py +++ b/neural_compressor/strategy/strategy.py @@ -382,7 +382,8 @@ def _prepare_tuning(self): # query capability and build tuning space self.capability = self.capability or self.adaptor.query_fw_capability(self.model) logger.debug(self.capability) - self.tuning_space = self.tuning_space or self.build_tuning_space(self.config) + # self.tuning_space = self.tuning_space or self.build_tuning_space(self.config) + self.tuning_space = self.build_tuning_space(self.config) self.algo_scheduler = self.algo_scheduler or self._initialize_algo_scheduler() self._eval_baseline() @@ -488,6 +489,10 @@ def traverse(self): # import pdb;pdb.set_trace() traverse_start_time = time() for op_tuning_cfg in self.next_tune_cfg(): + # op_tuning_cfg[('resnet_model/max_pooling2d/MaxPool', 'pooling')].act_dtype='fp32' + for k in op_tuning_cfg: + if k[1] == 'pooling': + op_tuning_cfg[k].act_dtype='fp32' tuning_start_time = time() self.trials_count += 1 tune_cfg = self._tune_cfg_converter(op_tuning_cfg) @@ -520,6 +525,7 @@ def traverse(self): self.algo_scheduler.reset_exec_algorithms() assert self.last_qmodel # return the last quantized model as a result. if not tune. + # self._not_tuning = True if self._not_tuning: self.best_qmodel = self.last_qmodel self._add_tuning_history(copy.deepcopy(tune_cfg), (-1, [0]), q_config=self.last_qmodel.q_config) From d564b77967cbcaa9b7bf43ca100f0f5c10fdbe8e Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Tue, 18 Mar 2025 07:14:18 +0800 Subject: [PATCH 16/25] update name changes Signed-off-by: Qun Gao --- .../tensorflow/algorithms/smoother/core.py | 6 +-- .../tensorflow/utils/__init__.py | 6 ++- .../tensorflow/utils/constants.py | 9 ++++ neural_compressor/tensorflow/utils/model.py | 49 +++++++++---------- .../tensorflow/utils/model_wrappers.py | 26 ++++++++++ 5 files changed, 67 insertions(+), 29 deletions(-) diff --git a/neural_compressor/tensorflow/algorithms/smoother/core.py b/neural_compressor/tensorflow/algorithms/smoother/core.py index 3a9fa37c1be..64efd596c04 100644 --- a/neural_compressor/tensorflow/algorithms/smoother/core.py +++ b/neural_compressor/tensorflow/algorithms/smoother/core.py @@ -29,7 +29,7 @@ from neural_compressor.tensorflow.algorithms.smoother.scaler import SmoothQuantScaler, SmoothQuantScalerLLM from neural_compressor.tensorflow.quantization.config import SmoothQuantConfig from neural_compressor.tensorflow.quantization.utils.graph_util import GraphAnalyzer -from neural_compressor.tensorflow.utils import SPR_BASE_VERSIONS, BaseModel, TensorflowLLMModel, framework_specific_info +from neural_compressor.tensorflow.utils import SPR_BASE_VERSIONS, BaseModel, TensorflowLLMModel, TFConfig class SmoothQuant: @@ -60,8 +60,8 @@ def __init__( self.calib_iteration = calib_iteration self.new_api = tf.version.VERSION in SPR_BASE_VERSIONS - self.device = framework_specific_info["device"] - self.itex_mode = framework_specific_info["backend"] == "itex" + self.device = TFConfig.global_config["device"] + self.itex_mode = TFConfig.global_config["backend"] == "itex" for _, value in self.config.items(): single_config = value diff --git a/neural_compressor/tensorflow/utils/__init__.py b/neural_compressor/tensorflow/utils/__init__.py index bba806fa373..89d5eb8f006 100644 --- a/neural_compressor/tensorflow/utils/__init__.py +++ b/neural_compressor/tensorflow/utils/__init__.py @@ -13,10 +13,14 @@ # limitations under the License. """The utils for Tensorflow.""" -from neural_compressor.tensorflow.utils.model import Model, framework_specific_info from neural_compressor.tensorflow.utils.data import BaseDataLoader, DummyDataset, DummyDatasetV2 +from neural_compressor.tensorflow.utils.model import ( + Model, + TFConfig, +) from neural_compressor.tensorflow.utils.constants import ( SPR_BASE_VERSIONS, + TENSORFLOW_DEFAULT_CONFIG, DEFAULT_SQ_ALPHA_ARGS, UNIFY_OP_TYPE_MAPPING, ) diff --git a/neural_compressor/tensorflow/utils/constants.py b/neural_compressor/tensorflow/utils/constants.py index b340fbc24b6..7dc9dd54c8d 100644 --- a/neural_compressor/tensorflow/utils/constants.py +++ b/neural_compressor/tensorflow/utils/constants.py @@ -26,6 +26,15 @@ "2.18.0", ) +TENSORFLOW_DEFAULT_CONFIG = { + "device": "cpu", + "backend": "default", + "approach": "post_training_static_quant", + "random_seed": 1978, + "format": "default", + "use_bf16": True, +} + DEFAULT_SQ_ALPHA_ARGS = { "alpha_min": 0.0, "alpha_max": 1.0, diff --git a/neural_compressor/tensorflow/utils/model.py b/neural_compressor/tensorflow/utils/model.py index f4939ce7541..0e03d753569 100644 --- a/neural_compressor/tensorflow/utils/model.py +++ b/neural_compressor/tensorflow/utils/model.py @@ -63,7 +63,6 @@ def __new__(cls, root, **kwargs): from neural_compressor.tensorflow.utils import itex_installed if isinstance(root, BaseModel): - framework_specific_info["backend"] = "itex" return root if kwargs.get("approach", None) == "quant_aware_training": @@ -76,11 +75,9 @@ def __new__(cls, root, **kwargs): if model_type == "keras" and not itex_installed(): model_type = "saved_model" - # model = TensorflowSubclassedKerasModel(root) - # framework_specific_info["backend"] = "itex" model = TensorflowModel(model_type, root, **kwargs) - conf = kwargs.pop("conf", "NA") - cls.set_framework_info(conf, model) + conf = kwargs.pop("conf", None) + cls.set_tf_config(conf, model) return model @@ -90,31 +87,33 @@ def set_tf_config(conf, model): config = TFConfig.global_config framework = "keras" if isinstance(model, KerasModel) else "tensorflow" - if conf.device: - framework_specific_info["device"] = conf.device - if conf.approach: - framework_specific_info["approach"] = conf.approach - if conf.random_seed: - framework_specific_info["random_seed"] = conf.random_seed - if conf.inputs: - framework_specific_info["inputs"] = conf.inputs - if conf.outputs: - framework_specific_info["outputs"] = conf.outputs + if conf and "device" in conf: + config["device"] = conf["device"] + if conf and "approach" in conf: + config["approach"] = conf["approach"] + if conf and "random_seed" in conf: + config["random_seed"] = conf["random_seed"] + if conf and "inputs" in conf: + config["inputs"] = conf["inputs"] + if conf and "outputs" in conf: + config["outputs"] = conf["outputs"] if framework == "keras": - framework_specific_info["backend"] = "itex" + config["backend"] = "itex" return - if conf.performance_only: - framework_specific_info["performance_only"] = conf.performance_only + from neural_compressor.tensorflow.utils import itex_installed - if conf.workspace_path: - framework_specific_info["workspace_path"] = conf.workspace_path - if conf.recipes: - framework_specific_info["recipes"] = conf.recipes + if conf and "performance_only" in conf: + config["performance_only"] = conf["performance_only"] + if itex_installed(): + config["backend"] = "itex" + if conf and "workspace_path" in conf: + config["workspace_path"] = conf["workspace_path"] + if conf and "recipes" in conf: + config["recipes"] = conf["recipes"] - framework_specific_info["use_bf16"] = conf.use_bf16 if conf.use_bf16 else False for item in ["scale_propagation_max_pooling", "scale_propagation_concat"]: - if framework_specific_info["recipes"] and item not in framework_specific_info["recipes"]: - framework_specific_info["recipes"].update({item: True}) + if "recipes" in config and item not in config["recipes"]: + config["recipes"].update({item: True}) \ No newline at end of file diff --git a/neural_compressor/tensorflow/utils/model_wrappers.py b/neural_compressor/tensorflow/utils/model_wrappers.py index 29410b43b55..1a753a501bd 100644 --- a/neural_compressor/tensorflow/utils/model_wrappers.py +++ b/neural_compressor/tensorflow/utils/model_wrappers.py @@ -546,6 +546,32 @@ def try_loading_keras(model, input_tensor_names, output_tensor_names): # pragma shutil.rmtree(temp_dir, True) return graph_def_session(graph_def, input_names, output_names, **kwargs) +def keras_session(model, input_tensor_names, output_tensor_names, **kwargs): + """Build session with keras model. + + Args: + model (string or tf.keras.Model): model path or tf.keras.Model object. + input_tensor_names (list of string): input_tensor_names of model. + output_tensor_names (list of string): output_tensor_names of model. + + Returns: + sess (tf.compat.v1.Session): tf.compat.v1.Session object. + input_tensor_names (list of string): validated input_tensor_names. + output_tensor_names (list of string): validated output_tensor_names. + """ + if tf.version.VERSION > "2.1.0": + try: + graph_def, input_names, output_names = _get_graph_from_saved_model_v3( + model, input_tensor_names, output_tensor_names + ) + except: + graph_def, input_names, output_names = try_loading_keras(model, input_tensor_names, output_tensor_names) + # tensorflow 1.x use v1 convert method + else: + tf.keras.backend.set_learning_phase(0) + graph_def, input_names, output_names = _get_graph_from_saved_model_v1(model) + + return graph_def_session(graph_def, input_names, output_names, **kwargs) def slim_session(model, input_tensor_names, output_tensor_names, **kwargs): # pragma: no cover """Build session with slim model. From ff5cb062d43289e4a17fd8230e1a7646ab77d3f6 Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Fri, 21 Mar 2025 05:21:58 +0800 Subject: [PATCH 17/25] Add raise error for Unsupported op type for per-channel quantization Signed-off-by: Qun Gao --- .../tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index d11315dc8b5..c6ebcf8f3b2 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -136,6 +136,8 @@ def do_transformation(self): Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 0) else: Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 1) + else: + raise ValueError("Unsupported op type for per-channel quantization.") # per-tensor weights and activations else: Helper.set_attr_int(uniform_quantize_node, "quantization_axis", -1) From 7f47d66841bb95fe49246679ff7158bf243b99f6 Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Fri, 21 Mar 2025 13:08:17 +0800 Subject: [PATCH 18/25] add ssd_mobilenet Signed-off-by: Qun Gao --- .../tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index c6ebcf8f3b2..528ecb0ea6e 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -130,6 +130,8 @@ def do_transformation(self): # const_weight->q->dq->conv2d if dequantize_down_node.op == "Conv2D": Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 3) + elif dequantize_down_node.op == "DepthwiseConv2dNative": + Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 2) # const_weight->q->dq->matmul elif dequantize_down_node.op == "MatMul": if str(dequantize_down_node.attr["transpose_b"])=='b: true\n': From 6564fa198e6246c62c4d2948e236bbd3cb18d6a0 Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Thu, 27 Mar 2025 07:44:40 +0800 Subject: [PATCH 19/25] remove debugging print Signed-off-by: Qun Gao --- .../tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 528ecb0ea6e..6fd529d8883 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -113,7 +113,7 @@ def do_transformation(self): zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype, quantize_pre_node_op) zero_point_name = quantize_min_name[:-4] + "zero_point" scale_name = quantize_min_name[:-4] + "scale" - print("zero_point_value:", zero_point_value) + # print("zero_point_value:", zero_point_value) zero_point_node = Helper.create_constant_node(zero_point_name, zero_point_value, dtypes.int32, device="cpu") scale_node = Helper.create_constant_node(scale_name, scale_value, dtypes.float32, device="cpu") From 2660fa30dc3342ffe69d7f002deb2b02790a19cb Mon Sep 17 00:00:00 2001 From: Qun Gao Date: Thu, 27 Mar 2025 07:51:37 +0800 Subject: [PATCH 20/25] add support for ssd_mobile Signed-off-by: Qun Gao --- .../tensorflow_models/ssd_mobilenet_v1/quantization/ptq/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/tensorflow/object_detection/tensorflow_models/ssd_mobilenet_v1/quantization/ptq/main.py b/examples/tensorflow/object_detection/tensorflow_models/ssd_mobilenet_v1/quantization/ptq/main.py index 28e98a4ef92..f8a185b5c71 100644 --- a/examples/tensorflow/object_detection/tensorflow_models/ssd_mobilenet_v1/quantization/ptq/main.py +++ b/examples/tensorflow/object_detection/tensorflow_models/ssd_mobilenet_v1/quantization/ptq/main.py @@ -106,6 +106,7 @@ def main(_): from neural_compressor import quantization from neural_compressor.config import PostTrainingQuantConfig config = PostTrainingQuantConfig( + backend="itex", inputs=["image_tensor"], outputs=["num_detections", "detection_boxes", "detection_scores", "detection_classes"], calibration_sampling_size=[10, 50, 100, 200]) From 3bd7ba9280707f2a06a9d294a1f2b1a13b4e4919 Mon Sep 17 00:00:00 2001 From: "Gao, Qun" Date: Thu, 27 Mar 2025 11:54:46 -0700 Subject: [PATCH 21/25] clean debug Signed-off-by: Gao, Qun --- .../tensorflow_models/vgg16/quantization/ptq/main.py | 1 - .../bert_large_squad_model_zoo/quantization/ptq/run_quant.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py index 141e640c0d2..5109dda6729 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/vgg16/quantization/ptq/main.py @@ -61,7 +61,6 @@ def eval_func(dataloader): latency_list = [] for idx, (inputs, labels) in enumerate(dataloader): # dataloader should keep the order and len of inputs same with input_tensor - print(idx) inputs = np.array([inputs]) feed_dict = dict(zip(input_tensor, inputs)) diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh index b3196b0262e..b13fe0d1da8 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh @@ -11,7 +11,7 @@ function main { # init params function init_params { - batch_size=1 + for var in "$@" do case $var in From 17b9e16838078046e3dec0dec7de7e5f0f9871e2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 22:48:45 +0000 Subject: [PATCH 22/25] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- neural_compressor/adaptor/keras.py | 16 ++++++-- .../adaptor/keras_utils/conv2d.py | 8 ++-- .../adaptor/keras_utils/dense.py | 6 ++- .../adaptor/keras_utils/pool2d.py | 18 +-------- .../adaptor/keras_utils/quantizer.py | 30 +++++---------- neural_compressor/adaptor/tensorflow.py | 4 +- .../adaptor/tf_utils/graph_converter.py | 11 ++++-- .../graph_rewriter/generic/fuse_gelu.py | 2 +- .../int8/convert_qdq_to_uniform_qdq.py | 38 ++++++++++--------- .../graph_rewriter/qdq/insert_qdq_pattern.py | 2 +- neural_compressor/adaptor/tf_utils/util.py | 6 +-- neural_compressor/data/datasets/dataset.py | 1 + neural_compressor/strategy/strategy.py | 4 +- .../algorithms/static_quant/keras.py | 8 ++-- .../algorithms/static_quant/tensorflow.py | 2 +- .../tensorflow/keras/layers/__init__.py | 2 +- .../keras/layers/layer_initializer.py | 2 +- .../quantization/algorithm_entry.py | 2 +- .../quantization/utils/graph_converter.py | 13 ++++--- .../graph_rewriter/generic/pre_optimize.py | 8 ++-- .../int8/convert_qdq_to_uniform_qdq.py | 36 +++++++++--------- .../quantization/utils/graph_util.py | 3 -- neural_compressor/tensorflow/utils/model.py | 5 +-- .../tensorflow/utils/model_wrappers.py | 10 +++-- 24 files changed, 116 insertions(+), 121 deletions(-) diff --git a/neural_compressor/adaptor/keras.py b/neural_compressor/adaptor/keras.py index 1c469641f87..15e0532c322 100644 --- a/neural_compressor/adaptor/keras.py +++ b/neural_compressor/adaptor/keras.py @@ -51,7 +51,13 @@ def _add_supported_quantized_objects(custom_objects): from neural_compressor.adaptor.keras_utils.dense import QDense from neural_compressor.adaptor.keras_utils.depthwise_conv2d import QDepthwiseConv2D from neural_compressor.adaptor.keras_utils.pool2d import QAvgPool2D, QMaxPool2D - from neural_compressor.adaptor.keras_utils.quantizer import DeQuantize, FakeQuant, Quantize, UniformQuantize, UniformDeQuantize + from neural_compressor.adaptor.keras_utils.quantizer import ( + DeQuantize, + FakeQuant, + Quantize, + UniformDeQuantize, + UniformQuantize, + ) from neural_compressor.adaptor.keras_utils.separable_conv2d import QSeparableConv2D custom_objects["Quantize"] = Quantize @@ -386,7 +392,9 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): json_model["config"]["layers"] = q_layers quantized_model = self._restore_model_from_json(json_model) - converted_model = self._calibrate_with_uniform_qdq(quantized_model, dataloader, self.quantize_config["calib_iteration"]) + converted_model = self._calibrate_with_uniform_qdq( + quantized_model, dataloader, self.quantize_config["calib_iteration"] + ) from neural_compressor.model.keras_model import KerasModel @@ -527,7 +535,7 @@ def _calibrate_with_uniform_qdq(self, model, dataloader, calib_interation): T = layer_config["T"] zero_points = 0 if T == "s8" else 128 ranges = 127 if T == "s8" else 255 - scales = max(abs(max_value), abs(min_value))/ranges + scales = max(abs(max_value), abs(min_value)) / ranges quantize_layer = { "class_name": "UniformQuantize", @@ -582,7 +590,7 @@ def _calibrate_with_uniform_qdq(self, model, dataloader, calib_interation): scales = [] zero_points = [] for i in range(len(max_value)): - scales.append(max(abs(max_value[i]), abs(min_value[i]))/127) + scales.append(max(abs(max_value[i]), abs(min_value[i])) / 127) zero_points.append(0) layer_config["scales"] = json.dumps(scales) layer_config["zero_points"] = json.dumps(zero_points) diff --git a/neural_compressor/adaptor/keras_utils/conv2d.py b/neural_compressor/adaptor/keras_utils/conv2d.py index 410c73e9996..d49622cecb8 100644 --- a/neural_compressor/adaptor/keras_utils/conv2d.py +++ b/neural_compressor/adaptor/keras_utils/conv2d.py @@ -164,7 +164,8 @@ def call(self, inputs): Tout=tf.qint8, quantization_min_val=-128, quantization_max_val=127, - quantization_axis=3,) + quantization_axis=3, + ) kernel = tf.raw_ops.UniformDequantize( input=kernel, @@ -173,7 +174,8 @@ def call(self, inputs): Tout=tf.float32, quantization_min_val=-128, quantization_max_val=127, - quantization_axis=3,) + quantization_axis=3, + ) outputs = tf.keras.backend.conv2d( inputs, @@ -194,4 +196,4 @@ def call(self, inputs): @classmethod def from_config(cls, config): - return cls(**config) \ No newline at end of file + return cls(**config) diff --git a/neural_compressor/adaptor/keras_utils/dense.py b/neural_compressor/adaptor/keras_utils/dense.py index b292df9941c..9f4e2a9301d 100644 --- a/neural_compressor/adaptor/keras_utils/dense.py +++ b/neural_compressor/adaptor/keras_utils/dense.py @@ -65,7 +65,8 @@ def call(self, inputs): Tout=tf.qint8, quantization_min_val=-128, quantization_max_val=127, - quantization_axis=1,) + quantization_axis=1, + ) kernel = tf.raw_ops.UniformDequantize( input=kernel, @@ -74,7 +75,8 @@ def call(self, inputs): Tout=tf.float32, quantization_min_val=-128, quantization_max_val=127, - quantization_axis=1,) + quantization_axis=1, + ) outputs = tf.keras.backend.dot(inputs, kernel) diff --git a/neural_compressor/adaptor/keras_utils/pool2d.py b/neural_compressor/adaptor/keras_utils/pool2d.py index 0e01585af1f..3acffbca61c 100644 --- a/neural_compressor/adaptor/keras_utils/pool2d.py +++ b/neural_compressor/adaptor/keras_utils/pool2d.py @@ -25,14 +25,7 @@ class QAvgPool2D(AveragePooling2D): def __init__( - self, - pool_size=(2, 2), - strides=None, - padding="valid", - data_format=None, - scales=78.7, - zero_points=0, - **kwargs + self, pool_size=(2, 2), strides=None, padding="valid", data_format=None, scales=78.7, zero_points=0, **kwargs ): super(QAvgPool2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs @@ -43,14 +36,7 @@ def __init__( class QMaxPool2D(MaxPooling2D): def __init__( - self, - pool_size=(2, 2), - strides=None, - padding="valid", - data_format=None, - scales=78.7, - zero_points=0, - **kwargs + self, pool_size=(2, 2), strides=None, padding="valid", data_format=None, scales=78.7, zero_points=0, **kwargs ): super(QMaxPool2D, self).__init__( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs diff --git a/neural_compressor/adaptor/keras_utils/quantizer.py b/neural_compressor/adaptor/keras_utils/quantizer.py index 87d363d2857..16a6da0be5c 100644 --- a/neural_compressor/adaptor/keras_utils/quantizer.py +++ b/neural_compressor/adaptor/keras_utils/quantizer.py @@ -138,22 +138,15 @@ def from_config(cls, config): class UniformQuantize(Layer): - def __init__( - self, - scales, - zero_points, - T="s8", - quantization_axis=-1, - **kwargs - ): + def __init__(self, scales, zero_points, T="s8", quantization_axis=-1, **kwargs): super(UniformQuantize, self).__init__(**kwargs) T_map = {"s8": tf.qint8, "u8": tf.quint8} self.scales = float(scales) self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.quantization_min_val = -128 if T=="s8" else 0 - self.quantization_max_val = 127 if T=="s8" else 255 + self.quantization_min_val = -128 if T == "s8" else 0 + self.quantization_max_val = 127 if T == "s8" else 255 def call(self, inputs): outputs = tf.raw_ops.UniformQuantize( @@ -163,7 +156,8 @@ def call(self, inputs): Tout=self.T, quantization_min_val=self.quantization_min_val, quantization_max_val=self.quantization_max_val, - quantization_axis=self.quantization_axis) + quantization_axis=self.quantization_axis, + ) return outputs @@ -181,20 +175,15 @@ def from_config(cls, config): class UniformDeQuantize(Layer): - def __init__(self, - scales, - zero_points, - T="s8", - quantization_axis=-1, - **kwargs): + def __init__(self, scales, zero_points, T="s8", quantization_axis=-1, **kwargs): super(UniformDeQuantize, self).__init__(**kwargs) T_map = {"s8": tf.qint8, "u8": tf.quint8} self.scales = float(scales) self.zero_points = int(zero_points) self.T = T_map[T] self.quantization_axis = quantization_axis - self.quantization_min_val = -128 if T=="s8" else 0 - self.quantization_max_val = 127 if T=="s8" else 255 + self.quantization_min_val = -128 if T == "s8" else 0 + self.quantization_max_val = 127 if T == "s8" else 255 def call(self, inputs): return tf.raw_ops.UniformDequantize( @@ -204,7 +193,8 @@ def call(self, inputs): Tout=tf.float32, quantization_min_val=self.quantization_min_val, quantization_max_val=self.quantization_max_val, - quantization_axis=self.quantization_axis) + quantization_axis=self.quantization_axis, + ) def get_config(self): return { diff --git a/neural_compressor/adaptor/tensorflow.py b/neural_compressor/adaptor/tensorflow.py index bcdcb2bb739..bd2f136d8c7 100644 --- a/neural_compressor/adaptor/tensorflow.py +++ b/neural_compressor/adaptor/tensorflow.py @@ -52,8 +52,8 @@ "2.14.0202335", "2.14.dev202335", "2.15.0202341", - "2.16.1", - "2.17.0", + "2.16.1", + "2.17.0", "2.18.0", ) diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index 374766576d2..c009854f1fc 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -936,16 +936,19 @@ def _convert_qdq(self): self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() - from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer + from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ( + ConvertUniformQDQOptimizer, + ) + self._tmp_graph_def = ConvertUniformQDQOptimizer( self._tmp_graph_def, self.min_max_name_value_dict - ).do_transformation() + ).do_transformation() self._tmp_graph_def = StripUnusedNodesOptimizer( self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names - ).do_transformation() + ).do_transformation() self._tmp_graph_def = StripEquivalentNodesOptimizer( self._tmp_graph_def, self._tmp_model.output_node_names - ).do_transformation() + ).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def self._tmp_model.graph_def.library.CopyFrom(self.model.graph_def.library) diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py index 81d9b9b7a03..e8eab79caa6 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py @@ -31,7 +31,7 @@ class FuseGeluOptimizer(GraphRewriterBase): # pragma: no cover def do_transformation(self): """Execute the fusion from small ops to Gelu.""" - if not tf.version.VERSION in ("1.15.0-up2", "1.15.0-up3"): + if tf.version.VERSION not in ("1.15.0-up2", "1.15.0-up3"): return self.model cur_graph = GraphAnalyzer() diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 6fd529d8883..313cef3adb7 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -68,15 +68,15 @@ def _calculate_zp_and_scale(self, min_value, max_value, dtype, quantize_pre_node if isinstance(max_value, float): if dtype == attr_value_pb2.AttrValue(type=self.int8_type): - scale_factor = max(abs(max_value), abs(min_value))/scale_range - else : # uint8 + scale_factor = max(abs(max_value), abs(min_value)) / scale_range + else: # uint8 scale_factor = (max_value - min_value) / scale_range - return (zp, scale_factor) if scale_range == 127 else (-round(min_value/scale_factor), scale_factor) + return (zp, scale_factor) if scale_range == 127 else (-round(min_value / scale_factor), scale_factor) scales = [] zero_points = [] for i in range(len(max_value)): - scale_factor = max(abs(max_value[i]), abs(min_value[i]))/scale_range + scale_factor = max(abs(max_value[i]), abs(min_value[i])) / scale_range scales.append(scale_factor) zero_points.append(zp) @@ -88,9 +88,7 @@ def do_transformation(self): Returns: [graphdef]: the optimized graphdef object """ - target_nodes = self.graph_analyzer.query_fusion_pattern_nodes( - [["QuantizeV2"], ["Dequantize"]] - ) + target_nodes = self.graph_analyzer.query_fusion_pattern_nodes([["QuantizeV2"], ["Dequantize"]]) for i in target_nodes: shared_quantize_node = False quantize_node_name = i[0] @@ -110,7 +108,9 @@ def do_transformation(self): except: min_value = self.min_max_dict[quantize_min_name] max_value = self.min_max_dict[quantize_max_name] - zero_point_value, scale_value = self._calculate_zp_and_scale(min_value, max_value, dtype, quantize_pre_node_op) + zero_point_value, scale_value = self._calculate_zp_and_scale( + min_value, max_value, dtype, quantize_pre_node_op + ) zero_point_name = quantize_min_name[:-4] + "zero_point" scale_name = quantize_min_name[:-4] + "scale" # print("zero_point_value:", zero_point_value) @@ -119,7 +119,7 @@ def do_transformation(self): uniform_quantize_node = node_def_pb2.NodeDef() uniform_quantize_node.op = "UniformQuantize" - uniform_quantize_node.name = quantize_node_name+"_UniformQuantize" + uniform_quantize_node.name = quantize_node_name + "_UniformQuantize" uniform_quantize_node.input.extend([quantize_node.input[0], scale_name, zero_point_name]) Helper.set_attr_int(uniform_quantize_node, "quantization_min_val", self.quantization_min_val) Helper.set_attr_int(uniform_quantize_node, "quantization_max_val", self.quantization_max_val) @@ -134,7 +134,7 @@ def do_transformation(self): Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 2) # const_weight->q->dq->matmul elif dequantize_down_node.op == "MatMul": - if str(dequantize_down_node.attr["transpose_b"])=='b: true\n': + if str(dequantize_down_node.attr["transpose_b"]) == "b: true\n": Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 0) else: Helper.set_attr_int(uniform_quantize_node, "quantization_axis", 1) @@ -147,17 +147,22 @@ def do_transformation(self): uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) uniform_dequantize_node = node_def_pb2.NodeDef() uniform_dequantize_node.op = "UniformDequantize" - uniform_dequantize_node.name = dequantize_node_name+"_UniformDequantize" - uniform_dequantize_node.input.extend([uniform_quantize_node.name, - scale_name, - zero_point_name, - ]) + uniform_dequantize_node.name = dequantize_node_name + "_UniformDequantize" + uniform_dequantize_node.input.extend( + [ + uniform_quantize_node.name, + scale_name, + zero_point_name, + ] + ) Helper.set_attr_int(uniform_dequantize_node, "quantization_min_val", self.quantization_min_val) Helper.set_attr_int(uniform_dequantize_node, "quantization_max_val", self.quantization_max_val) Helper.set_attr_dtype(uniform_dequantize_node, "Tout", dtypes.float32) if "quantization_axis" in uniform_quantize_node.attr: - uniform_dequantize_node.attr["quantization_axis"].CopyFrom(uniform_quantize_node.attr["quantization_axis"]) + uniform_dequantize_node.attr["quantization_axis"].CopyFrom( + uniform_quantize_node.attr["quantization_axis"] + ) if "Tin" in uniform_quantize_node.attr: uniform_dequantize_node.attr["Tin"].CopyFrom(uniform_quantize_node.attr["Tout"]) @@ -191,5 +196,4 @@ def do_transformation(self): self.graph_analyzer.remove_node(quantize_node_name) self.graph_analyzer.remove_node(dequantize_node_name) - return self.graph_analyzer.dump_graph() diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py index bdeb6e1b8b6..2d1fdbb558b 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern.py @@ -62,7 +62,7 @@ def __init__( self.llm_weight_minmax = llm_weight_minmax self.node_details = namedtuple("node_details", ["node", "output"]) self.node_name_mapping = {} - self.min_max_name_value_dict={} + self.min_max_name_value_dict = {} self.check_op_list = { "ConcatV2", "Conv2D", diff --git a/neural_compressor/adaptor/tf_utils/util.py b/neural_compressor/adaptor/tf_utils/util.py index 5a8dc002c8c..abe11bcd09d 100644 --- a/neural_compressor/adaptor/tf_utils/util.py +++ b/neural_compressor/adaptor/tf_utils/util.py @@ -46,9 +46,9 @@ "2.14.0202335", "2.14.dev202335", "2.15.0202341", - "2.16.1", - "2.17.0", - "2.18.0" + "2.16.1", + "2.17.0", + "2.18.0", ) diff --git a/neural_compressor/data/datasets/dataset.py b/neural_compressor/data/datasets/dataset.py index eb543ca14f4..7b32cdfbbc9 100644 --- a/neural_compressor/data/datasets/dataset.py +++ b/neural_compressor/data/datasets/dataset.py @@ -1093,6 +1093,7 @@ class TensorflowImageRecord(IterableDataset): # pragma: no cover def __new__(cls, root, transform=None, filter=None): """Build a new object of TensorflowImageRecord class.""" from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + print(root) glob_pattern = os.path.join(root, "*-*-of-*") file_names = gfile.Glob(glob_pattern) diff --git a/neural_compressor/strategy/strategy.py b/neural_compressor/strategy/strategy.py index 16a35fa1e78..7e48c9a7668 100644 --- a/neural_compressor/strategy/strategy.py +++ b/neural_compressor/strategy/strategy.py @@ -486,8 +486,8 @@ def traverse(self): for op_tuning_cfg in self.next_tune_cfg(): # op_tuning_cfg[('resnet_model/max_pooling2d/MaxPool', 'pooling')].act_dtype='fp32' for k in op_tuning_cfg: - if k[1] == 'pooling': - op_tuning_cfg[k].act_dtype='fp32' + if k[1] == "pooling": + op_tuning_cfg[k].act_dtype = "fp32" tuning_start_time = time() self.trials_count += 1 tune_cfg = self._tune_cfg_converter(op_tuning_cfg) diff --git a/neural_compressor/tensorflow/algorithms/static_quant/keras.py b/neural_compressor/tensorflow/algorithms/static_quant/keras.py index 87f67c66044..ea49ae30342 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/keras.py +++ b/neural_compressor/tensorflow/algorithms/static_quant/keras.py @@ -155,7 +155,6 @@ def _check_quantize_format(self, model): else: input_layer_dict[layer_name].append(layer.name) - for layer in model.layers: if layer.__class__.__name__ in self.supported_op: self.conv_format[layer.name] = "s8" @@ -355,7 +354,7 @@ def fuse_conv_bn(conv_weight, bn_weight, conv_type="Conv2D", eps=1.0e-5): bn_fused_model.save(self.tmp_dir) bn_fused_model = tf.keras.models.load_model(self.tmp_dir) - + return bn_fused_model @dump_elapsed_time("Pass quantize model") @@ -470,7 +469,7 @@ def _calibrate(self, model, dataloader=None, calib_interation=None): model.save(self.tmp_dir) quantized_model = tf.keras.models.load_model(self.tmp_dir) - + return quantized_model @dump_elapsed_time(customized_msg="Model inference") @@ -696,6 +695,7 @@ def _get_specified_version_cfg(self, data): default_config = sub_data return default_config + def get_version(self): """Get the current backend version information. @@ -928,4 +928,4 @@ def insert_quant_layers(self, q_layer_dict=None): if layer.name in self.model.output_names: self.model_outputs.append(x) - return tf.keras.models.Model(inputs=self.model.inputs, outputs=self.model_outputs) \ No newline at end of file + return tf.keras.models.Model(inputs=self.model.inputs, outputs=self.model_outputs) diff --git a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py index 253dc60d104..963e769b576 100644 --- a/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py +++ b/neural_compressor/tensorflow/algorithms/static_quant/tensorflow.py @@ -1106,7 +1106,7 @@ def quantize( tmp_iterations = int(math.ceil(self.calib_sampling_size / calib_batch_size)) calib_dataloader.batch(calib_batch_size) self.quantize_config["calib_iteration"] = tmp_iterations - + converted_model = GraphConverter( model, qt_config=self.quantize_config, diff --git a/neural_compressor/tensorflow/keras/layers/__init__.py b/neural_compressor/tensorflow/keras/layers/__init__.py index 5947c19f246..0e966814f6e 100644 --- a/neural_compressor/tensorflow/keras/layers/__init__.py +++ b/neural_compressor/tensorflow/keras/layers/__init__.py @@ -21,4 +21,4 @@ from neural_compressor.tensorflow.keras.layers.depthwise_conv2d import QDepthwiseConv2D from neural_compressor.tensorflow.keras.layers.pool2d import QAvgPool2D, QMaxPool2D from neural_compressor.tensorflow.keras.layers.separable_conv2d import QSeparableConv2D -from neural_compressor.tensorflow.keras.layers.layer_initializer import layer_initializer_dict \ No newline at end of file +from neural_compressor.tensorflow.keras.layers.layer_initializer import layer_initializer_dict diff --git a/neural_compressor/tensorflow/keras/layers/layer_initializer.py b/neural_compressor/tensorflow/keras/layers/layer_initializer.py index 6400e7f5495..c296e24741c 100644 --- a/neural_compressor/tensorflow/keras/layers/layer_initializer.py +++ b/neural_compressor/tensorflow/keras/layers/layer_initializer.py @@ -31,4 +31,4 @@ "QDepthwiseConv2D": initialize_int8_depthwise_conv2d, "QConv2D": initialize_int8_conv2d, "QDense": initialize_int8_dense, -} \ No newline at end of file +} diff --git a/neural_compressor/tensorflow/quantization/algorithm_entry.py b/neural_compressor/tensorflow/quantization/algorithm_entry.py index 719aea5f7c1..3f7f244e22d 100644 --- a/neural_compressor/tensorflow/quantization/algorithm_entry.py +++ b/neural_compressor/tensorflow/quantization/algorithm_entry.py @@ -17,7 +17,7 @@ from neural_compressor.common.base_config import BaseConfig from neural_compressor.common.utils import SMOOTH_QUANT, STATIC_QUANT -from neural_compressor.tensorflow.algorithms import KerasAdaptor, TensorFlowAdaptor, Tensorflow_ITEXAdaptor +from neural_compressor.tensorflow.algorithms import KerasAdaptor, Tensorflow_ITEXAdaptor, TensorFlowAdaptor from neural_compressor.tensorflow.quantization.config import SmoothQuantConfig from neural_compressor.tensorflow.utils import BaseModel, KerasModel, TFConfig, register_algo, valid_keras_format diff --git a/neural_compressor/tensorflow/quantization/utils/graph_converter.py b/neural_compressor/tensorflow/quantization/utils/graph_converter.py index 7e8c07d947a..1c4db65f562 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_converter.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_converter.py @@ -243,7 +243,7 @@ def _inference(self, model): # INC needs turn off ITEX optimization pass in calibration stage. # TODO ITEX will provide API to replace setting environment variable. os.environ["ITEX_REMAPPER"] = "0" - + sess = model.sess iter_op = model.iter_op input_tensor = model.input_tensor @@ -332,8 +332,8 @@ def check_shape(tensor, data): def _inference_llm(self, model): logger.info("Start sampling on calibration dataset.") - f=tf.io.gfile.GFile('calib_qdq.pb','wb') - f.write(model.graph_def.SerializeToString()) + f = tf.io.gfile.GFile("calib_qdq.pb", "wb") + f.write(model.graph_def.SerializeToString()) input_tensor_names = model.input_tensor_names auto_trackable = model.model infer = auto_trackable.signatures["serving_default"] @@ -880,7 +880,7 @@ def _insert_qdq_pairs(self): ) self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) - + # Find out the quantized nodes self.quantized_node_info = OptimizeQDQGraph( self._tmp_graph_def, @@ -982,7 +982,10 @@ def _convert_qdq(self): self._tmp_graph_def = ShareQDQForItexYPatternOptimizer(self._tmp_graph_def).do_transformation() # self._tmp_graph_def = MergeDuplicatedQDQOptimizer(self._tmp_graph_def).do_transformation() - from neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ConvertUniformQDQOptimizer + from neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.convert_qdq_to_uniform_qdq import ( + ConvertUniformQDQOptimizer, + ) + self._tmp_graph_def = ConvertUniformQDQOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library) self._tmp_model.graph_def = self._tmp_graph_def diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py index 9b75d6ac98a..d30079364e7 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize.py @@ -185,10 +185,10 @@ def get_optimized_model(self, itex_mode=False): # Put FuseDecomposedBNOptimizer before GraphFoldConstantOptimizer # The 'Sub' op in the small decomposed ops of BN will be converted to const by GraphFoldConstantOptimizer. # Then the FuseDecomposedBNOptimizer can't fuse the small decomposed ops to BN. - #if self.new_api: - #self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation() - #self._tmp_graph_def = FuseDecomposedINOptimizer(self._tmp_graph_def).do_transformation() - #self._tmp_graph_def = FuseLayerNormOptimizer(self._tmp_graph_def).do_transformation() + # if self.new_api: + # self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation() + # self._tmp_graph_def = FuseDecomposedINOptimizer(self._tmp_graph_def).do_transformation() + # self._tmp_graph_def = FuseLayerNormOptimizer(self._tmp_graph_def).do_transformation() self._tmp_graph_def = GraphFoldConstantOptimizer(self._tmp_graph_def).do_transformation() diff --git a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py index 87e67867d25..239c6dbd029 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/convert_qdq_to_uniform_qdq.py @@ -61,14 +61,14 @@ def _calculate_zp_and_scale(self, min_value, max_value, dtype): self.quantization_max_val = 255 else: raise ValueError("Unexpected data type for Quantize Op.") - + if isinstance(max_value, float): - return zp, max(abs(max_value), abs(min_value))/scale_range - + return zp, max(abs(max_value), abs(min_value)) / scale_range + scales = [] zero_points = [] for i in range(len(max_value)): - scales.append(max(abs(max_value[i]), abs(min_value[i]))/scale_range) + scales.append(max(abs(max_value[i]), abs(min_value[i])) / scale_range) zero_points.append(zp) return zero_points, scales @@ -79,9 +79,7 @@ def do_transformation(self): Returns: [graphdef]: the optimized graphdef object """ - target_nodes = self.graph_analyzer.query_fusion_pattern_nodes( - [["QuantizeV2"], ["Dequantize"]] - ) + target_nodes = self.graph_analyzer.query_fusion_pattern_nodes([["QuantizeV2"], ["Dequantize"]]) for i in target_nodes: shared_quantize_node = False quantize_node_name = i[0] @@ -106,7 +104,7 @@ def do_transformation(self): uniform_quantize_node = node_def_pb2.NodeDef() uniform_quantize_node.op = "UniformQuantize" - uniform_quantize_node.name = quantize_node_name+"_UniformQuantize" + uniform_quantize_node.name = quantize_node_name + "_UniformQuantize" uniform_quantize_node.input.extend([quantize_node.input[0], scale_name, zero_point_name]) Helper.set_attr_int(uniform_quantize_node, "quantization_min_val", self.quantization_min_val) Helper.set_attr_int(uniform_quantize_node, "quantization_max_val", self.quantization_max_val) @@ -116,20 +114,21 @@ def do_transformation(self): uniform_quantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["axis"]) uniform_quantize_node.attr["Tout"].CopyFrom(quantize_node.attr["T"]) - uniform_dequantize_node = node_def_pb2.NodeDef() uniform_dequantize_node.op = "UniformDequantize" - uniform_dequantize_node.name = dequantize_node_name+"_UniformDequantize" - - - uniform_dequantize_node.input.extend([uniform_quantize_node.name, - scale_name, - zero_point_name, - ]) + uniform_dequantize_node.name = dequantize_node_name + "_UniformDequantize" + + uniform_dequantize_node.input.extend( + [ + uniform_quantize_node.name, + scale_name, + zero_point_name, + ] + ) Helper.set_attr_int(uniform_dequantize_node, "quantization_min_val", self.quantization_min_val) Helper.set_attr_int(uniform_dequantize_node, "quantization_max_val", self.quantization_max_val) Helper.set_attr_dtype(uniform_dequantize_node, "Tout", dtypes.float32) - + if "quantization_axis" in quantize_node.attr: uniform_dequantize_node.attr["quantization_axis"].CopyFrom(quantize_node.attr["quantization_axis"]) if "Tin" in uniform_quantize_node.attr: @@ -139,7 +138,7 @@ def do_transformation(self): self.graph_analyzer.add_node(zero_point_node, None, [uniform_quantize_node.name]) self.graph_analyzer.add_node(scale_node, None, [uniform_quantize_node.name]) - + quantize_output_node_name = set() for node_name in self.graph_info[quantize_node_name].outputs: quantize_output_node_name.add(node_name) @@ -165,5 +164,4 @@ def do_transformation(self): self.graph_analyzer.remove_node(quantize_node_name) self.graph_analyzer.remove_node(dequantize_node_name) - return self.graph_analyzer.dump_graph() diff --git a/neural_compressor/tensorflow/quantization/utils/graph_util.py b/neural_compressor/tensorflow/quantization/utils/graph_util.py index d2a744d75a2..2e6a745366e 100644 --- a/neural_compressor/tensorflow/quantization/utils/graph_util.py +++ b/neural_compressor/tensorflow/quantization/utils/graph_util.py @@ -541,8 +541,6 @@ def replace_single_node( self.node_name_details[each_input_node_name].node.ClearField("input") self.node_name_details[each_input_node_name].node.input.extend(new_input_name) - - def replace_node(self, new_node, old_node_name, output_nodes_name): """Replace the node into the internal data structure node_name_details. @@ -707,7 +705,6 @@ def parse_graph(self, input_graph_def=None): for each_input in node_details.node.input: self.node_name_details[GraphRewriterHelper.node_name_from_input(each_input)].outputs.append(node_name) - return self.node_name_details diff --git a/neural_compressor/tensorflow/utils/model.py b/neural_compressor/tensorflow/utils/model.py index 0e03d753569..1bf4e6cc259 100644 --- a/neural_compressor/tensorflow/utils/model.py +++ b/neural_compressor/tensorflow/utils/model.py @@ -111,9 +111,8 @@ def set_tf_config(conf, model): if conf and "workspace_path" in conf: config["workspace_path"] = conf["workspace_path"] if conf and "recipes" in conf: - config["recipes"] = conf["recipes"] - + config["recipes"] = conf["recipes"] for item in ["scale_propagation_max_pooling", "scale_propagation_concat"]: if "recipes" in config and item not in config["recipes"]: - config["recipes"].update({item: True}) \ No newline at end of file + config["recipes"].update({item: True}) diff --git a/neural_compressor/tensorflow/utils/model_wrappers.py b/neural_compressor/tensorflow/utils/model_wrappers.py index 1a753a501bd..6747805a79b 100644 --- a/neural_compressor/tensorflow/utils/model_wrappers.py +++ b/neural_compressor/tensorflow/utils/model_wrappers.py @@ -546,6 +546,7 @@ def try_loading_keras(model, input_tensor_names, output_tensor_names): # pragma shutil.rmtree(temp_dir, True) return graph_def_session(graph_def, input_names, output_names, **kwargs) + def keras_session(model, input_tensor_names, output_tensor_names, **kwargs): """Build session with keras model. @@ -573,6 +574,7 @@ def keras_session(model, input_tensor_names, output_tensor_names, **kwargs): return graph_def_session(graph_def, input_names, output_names, **kwargs) + def slim_session(model, input_tensor_names, output_tensor_names, **kwargs): # pragma: no cover """Build session with slim model. @@ -1302,6 +1304,7 @@ def adjust_weight(self, graph_def): self.model_path = os.path.abspath(os.path.expanduser(self.model_path)) if os.path.exists(self.model_path): import shutil + shutil.rmtree(self.model_path) os.makedirs(self.model_path, exist_ok=True) @@ -1310,8 +1313,8 @@ def adjust_weight(self, graph_def): if not self._sq_weight_scale_dict: self._auto_trackable = model - return - + return + for idx, weight_tensor in enumerate(model.variables): parsed_weight_name = self.weight_name_mapping(weight_tensor.name) if parsed_weight_name in self.sq_weight_scale_dict: @@ -1378,7 +1381,6 @@ def model(self): if self._keras_model: return self._keras_model - root = DEFAULT_WORKSPACE + "/saved_model" root = os.path.abspath(os.path.expanduser(root)) if os.path.exists(root): @@ -1388,7 +1390,7 @@ def model(self): self._load_sess(self._model, **self.kwargs) _, builder = self.build_saved_model(root) builder.save() - self._keras_model = self._build_as_functional_model(root) + self._keras_model = self._build_as_functional_model(root) shutil.rmtree(root) return self._keras_model From af3910ac665afd9109db87f2dc26b74c8fca6676 Mon Sep 17 00:00:00 2001 From: "Gao, Qun" Date: Mon, 7 Apr 2025 11:19:11 -0700 Subject: [PATCH 23/25] Update to address Leon's feedback Signed-off-by: Gao, Qun --- neural_compressor/data/datasets/dataset.py | 1 - neural_compressor/strategy/strategy.py | 1 - neural_compressor/tensorflow/utils/utility.py | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/neural_compressor/data/datasets/dataset.py b/neural_compressor/data/datasets/dataset.py index 7b32cdfbbc9..46cec9ba36a 100644 --- a/neural_compressor/data/datasets/dataset.py +++ b/neural_compressor/data/datasets/dataset.py @@ -1094,7 +1094,6 @@ def __new__(cls, root, transform=None, filter=None): """Build a new object of TensorflowImageRecord class.""" from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module - print(root) glob_pattern = os.path.join(root, "*-*-of-*") file_names = gfile.Glob(glob_pattern) if not file_names: diff --git a/neural_compressor/strategy/strategy.py b/neural_compressor/strategy/strategy.py index 7e48c9a7668..2fe3089430d 100644 --- a/neural_compressor/strategy/strategy.py +++ b/neural_compressor/strategy/strategy.py @@ -517,7 +517,6 @@ def traverse(self): self.algo_scheduler.reset_exec_algorithms() assert self.last_qmodel # return the last quantized model as a result. if not tune. - # self._not_tuning = True if self._not_tuning: self.best_qmodel = self.last_qmodel self._add_tuning_history(copy.deepcopy(tune_cfg), (-1, [0]), q_config=self.last_qmodel.q_config) diff --git a/neural_compressor/tensorflow/utils/utility.py b/neural_compressor/tensorflow/utils/utility.py index 4cb56a0b13e..44a7af398a1 100644 --- a/neural_compressor/tensorflow/utils/utility.py +++ b/neural_compressor/tensorflow/utils/utility.py @@ -100,7 +100,7 @@ def deep_get(dictionary, keys, default=None): def itex_installed(): """Check if the IntelĀ® Extension for TensorFlow has been installed.""" try: - # import intel_extension_for_tensorflow + import intel_extension_for_tensorflow return True except: From 9a7b86f0fe9c852db4f7577cbee3239f91e13eb6 Mon Sep 17 00:00:00 2001 From: "Gao, Qun" Date: Fri, 18 Apr 2025 13:06:41 -0700 Subject: [PATCH 24/25] Add missing pydantic for UT pass Signed-off-by: Gao, Qun --- test/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test/requirements.txt b/test/requirements.txt index 0c117db3d86..bc2dbb88e83 100644 --- a/test/requirements.txt +++ b/test/requirements.txt @@ -18,3 +18,4 @@ torch torchvision transformers<=4.50.0 xgboost<=2.1.4 +pydantic \ No newline at end of file From dec25bd95d34885917159f2714fe4ff6a699321c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 18 Apr 2025 20:09:31 +0000 Subject: [PATCH 25/25] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- test/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/requirements.txt b/test/requirements.txt index bc2dbb88e83..37b99f5aac2 100644 --- a/test/requirements.txt +++ b/test/requirements.txt @@ -11,6 +11,7 @@ onnxruntime onnxruntime-extensions; python_version < '3.11' optimum<=1.24.0 peft<=0.14.0 +pydantic tensorflow-addons<=0.23.0 tf2onnx<=1.16.1 tf_slim<=1.1.0 @@ -18,4 +19,3 @@ torch torchvision transformers<=4.50.0 xgboost<=2.1.4 -pydantic \ No newline at end of file