Headline
CVE-2023-25670: Merge pull request #59437 from Intel-tensorflow:amin/fix-qmatmul · tensorflow/tensorflow@8a47a39
TensorFlow is an open source platform for machine learning. Versions prior to 2.12.0 and 2.11.1 have a null point error in QuantizedMatMulWithBiasAndDequantize with MKL enabled. A fix is included in TensorFlow version 2.12.0 and version 2.11.1.
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #if defined(INTEL_MKL) && defined(ENABLE_MKL) #if defined(INTEL_MKL) #define EIGEN_USE_THREADS
#include <functional> @@ -64,10 +64,10 @@ TEST_F(QuantizedMatMulTest, Small_withBias) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -116,10 +116,10 @@ TEST_F(QuantizedMatMulTest, Small_withNegBias) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {100, -200, 300, -400}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -178,10 +178,10 @@ TEST_F(QuantizedMatMulTest, Small_WithNegInp) { AddInputFromArray<qint8>(TensorShape({3, 2}), {1, 4, 2, 5, 3, 6}); // Bias AddInputFromArray<float>(TensorShape({2}), {10.0f, 20.0f}); AddInputFromArray<float>(TensorShape({1}), {-12.0f}); AddInputFromArray<float>(TensorShape({1}), {243.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {-12.0f}); AddInputFromArray<float>(TensorShape({}), {243.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel()); // First calculate C = A * B, @@ -240,12 +240,12 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReq) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -308,12 +308,12 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndDeq) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -375,10 +375,10 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndRelu) { {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<float>(TensorShape({4}), {100.0f, -200.0f, 300.0f, -400.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -431,12 +431,12 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReluAndReq) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: @@ -502,10 +502,10 @@ TEST_F(QuantizedMatMulTest, Small_withWeightCached) { AddInputFromArray<qint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); AddInputFromArray<float>(TensorShape({1}), {-127.0f}); AddInputFromArray<float>(TensorShape({1}), {127.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {-127.0f}); AddInputFromArray<float>(TensorShape({}), {127.0f});
int64 start_time = Env::Default()->NowMicros(); TF_ASSERT_OK(RunOpKernel()); @@ -543,4 +543,4 @@ TEST_F(QuantizedMatMulTest, Small_withWeightCached) {
} // namespace tensorflow
#endif // INTEL_MKL && ENABLE_MKL #endif // INTEL_MKL
Related news
Vulnerability in the Oracle Hyperion Financial Reporting product of Oracle Hyperion (component: Repository). The supported version that is affected is 11.2.13.0.000. Easily exploitable vulnerability allows low privileged attacker with network access via HTTP to compromise Oracle Hyperion Financial Reporting. While the vulnerability is in Oracle Hyperion Financial Reporting, attacks may significantly impact additional products (scope change). Successful attacks of this vulnerability can result in unauthorized access to critical data or complete access to all Oracle Hyperion Financial Reporting accessible data and unauthorized ability to cause a partial denial of service (partial DOS) of Oracle Hyperion Financial Reporting. CVSS 3.1 Base Score 8.5 (Confidentiality and Availability impacts). CVSS Vector: (CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:C/C:H/I:N/A:L).
### Impact NPE in QuantizedMatMulWithBiasAndDequantize with MKL enable ```python import tensorflow as tf func = tf.raw_ops.QuantizedMatMulWithBiasAndDequantize para={'a': tf.constant(138, dtype=tf.quint8), 'b': tf.constant(4, dtype=tf.qint8), 'bias': [[31.81644630432129, 47.21876525878906], [109.95201110839844, 152.07968139648438]], 'min_a': 141.5337138686371, 'max_a': [73.84139251708984, 173.15280151367188], 'min_b': [], 'max_b': [[16.128345489501953, 193.26820373535156]], 'min_freezed_output': [], 'max_freezed_output': [115.50032806396484, 156.974853515625], 'Toutput': 1.0, 'transpose_a': True, 'transpose_b': False, 'input_quant_mode': 'MIN_FIRST'} func(**para) ``` ### Patches We have patched the issue in GitHub commit [8a47a39d9697969206d23a523c977238717e8727](https://github.com/tensorflow/tensorflow/commit/8a47a39d9697969206d23a523c977238717e8727). The fix will be included in TensorFlow 2.12.0. We will also cherrypick this commit on TensorFlow 2.11.1 ### For more information...