Headline
CVE-2022-36027: Skip reordering dq-q patterns when the new quantization dimension is … · tensorflow/tensorflow@aa0b852
TensorFlow is an open source platform for machine learning. When converting transposed convolutions using per-channel weight quantization the converter segfaults and crashes the Python process. We have patched the issue in GitHub commit aa0b852a4588cea4d36b74feb05d93055540b450. The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range. There are no known workarounds for this issue.
@@ -2311,6 +2311,44 @@ def testKerasFullyConnectedOutputShape3D(self):
list(output_details[0][‘shape_signature’]),
list(model.layers[-1].output_shape))
@test_util.run_v2_only
def testKerasConv2DTransposedWithMismatchQuantizedAxes(self):
class QuantConv2DTransposed(tf.keras.layers.Layer):
def build(self, input_shape):
self.kernel = self.add_weight('kernel’, [3, 3, input_shape[-1], 24])
def call(self, inputs):
filters = tf.quantization.fake_quant_with_min_max_vars_per_channel(
self.kernel,
-3.0 * tf.ones([24]),
3.0 * tf.ones([24]),
narrow_range=True)
filters = tf.transpose(filters, (0, 1, 3, 2))
return tf.nn.conv2d_transpose(inputs, filters, [*inputs.shape[:-1], 24],
inp = tf.keras.Input(shape=(6, 8, 48), batch_size=1)
x = tf.quantization.fake_quant_with_min_max_vars(
inp, -3.0, 3.0, narrow_range=True)
x = QuantConv2DTransposed()(x)
x = tf.quantization.fake_quant_with_min_max_vars(
x, -3.0, 3.0, narrow_range=True)
model = tf.keras.Model(inp, x)
saved_model_dir = os.path.join(self.get_temp_dir(),
‘keras_conv2d_transpose’)
model.save(saved_model_dir)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
with self.assertRaises(convert.ConverterError) as error:
_ = converter.convert()
self.assertIn('mismatched quantized axes of input and output’,
str(error.exception))
def _createModelWithInputShape(self, shape):
“""Create a simple SavedModel with a certain shape.""”
saved_model_dir = os.path.join(self.get_temp_dir(), ‘input_shape_model’)
Related news
### Impact When converting transposed convolutions using per-channel weight quantization the converter segfaults and crashes the Python process. ```python import tensorflow as tf class QuantConv2DTransposed(tf.keras.layers.Layer): def build(self, input_shape): self.kernel = self.add_weight("kernel", [3, 3, input_shape[-1], 24]) def call(self, inputs): filters = tf.quantization.fake_quant_with_min_max_vars_per_channel( self.kernel, -3.0 * tf.ones([24]), 3.0 * tf.ones([24]), narrow_range=True ) filters = tf.transpose(filters, (0, 1, 3, 2)) return tf.nn.conv2d_transpose(inputs, filters, [*inputs.shape[:-1], 24], 1) inp = tf.keras.Input(shape=(6, 8, 48), batch_size=1) x = tf.quantization.fake_quant_with_min_max_vars(inp, -3.0, 3.0, narrow_range=True) x = QuantConv2DTransposed()(x) x = tf.quantization.fake_quant_with_min_max_vars(x, -3.0, 3.0, narrow_range=True) model = tf.keras.Model(inp, x) model.save("/tmp/testing") convert...