Headline
CVE-2022-35937: tensorflow/gather_nd.cc at f463040eb3997e42e60a2ffc6dc72de7ef11dbb4 · tensorflow/tensorflow
TensorFlow is an open source platform for machine learning. The GatherNd
function takes arguments that determine the sizes of inputs and outputs. If the inputs given are greater than or equal to the sizes of the outputs, an out-of-bounds memory read is triggered. This issue has been patched in GitHub commit 595a65a3e224a0362d7e68c2213acfc2b499a196. The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range. There are no known workarounds for this issue.
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include “tensorflow/lite/c/common.h” #include “tensorflow/lite/kernels/internal/optimized/optimized_ops.h” #include “tensorflow/lite/kernels/internal/reference/reference_ops.h” #include “tensorflow/lite/kernels/internal/tensor.h” #include “tensorflow/lite/kernels/internal/tensor_ctypes.h” #include “tensorflow/lite/kernels/kernel_util.h” namespace tflite { namespace ops { namespace builtin { namespace gather_nd { constexpr int kParams = 0; constexpr int kIndices = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* params; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms)); const TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (params->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: case kTfLiteInt64: case kTfLiteInt32: case kTfLiteString: break; default: TF_LITE_KERNEL_LOG(context, "Params of type ‘%s’ are not supported by gather_nd.", TfLiteTypeGetName(params->type)); return kTfLiteError; } switch (indices->type) { case kTfLiteInt64: case kTfLiteInt32: break; default: TF_LITE_KERNEL_LOG(context, "Indices of type ‘%s’ are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } const int params_rank = NumDimensions(params); const int indices_rank = NumDimensions(indices); const int indices_nd = SizeOfDimension(indices, indices_rank - 1); if (params_rank < 1) { TF_LITE_KERNEL_LOG(context, “Params must be at least a vector.”); return kTfLiteError; } if (indices_rank < 1) { TF_LITE_KERNEL_LOG(context, “Indices must be at least a vector.”); return kTfLiteError; } if (indices_nd > params_rank) { TF_LITE_KERNEL_LOG( context, “Index innermost dimension length must be <= params rank.”); return kTfLiteError; } // Assign to output the input type. output->type = params->type; // The result shape is // indices.shape[:-1] + params.shape[indices.shape[-1]:] const int output_rank = indices_rank + params_rank - indices_nd - 1; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); int output_index = 0; for (int i = 0; i < indices_rank - 1; ++i) { output_shape->data[output_index++] = indices->dims->data[i]; } for (int i = indices_nd; i < params_rank; ++i) { output_shape->data[output_index++] = params->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } template <typename ParamsT, typename IndicesT> TfLiteStatus GatherNd(const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { reference_ops::GatherNd( GetTensorShape(params), GetTensorData<ParamsT>(params), GetTensorShape(indices), GetTensorData<IndicesT>(indices), GetTensorShape(output), GetTensorData<ParamsT>(output)); return kTfLiteOk; } template <typename IndicesT> TfLiteStatus GatherNdString(const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { reference_ops::GatherNdString( GetTensorShape(params), params, GetTensorShape(indices), GetTensorData<IndicesT>(indices), GetTensorShape(output), output); return kTfLiteOk; } template <typename IndicesT> TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { bool indices_has_only_positive_elements = true; const auto* indices_values = GetTensorData<IndicesT>(indices); const size_t num_indices = indices->bytes / sizeof(IndicesT); for (size_t i = 0; i < num_indices; i++) { if (indices_values[i] < 0) { indices_has_only_positive_elements = false; break; } } TF_LITE_ENSURE(context, indices_has_only_positive_elements); switch (params->type) { case kTfLiteFloat32: return GatherNd<float, IndicesT>(params, indices, output); case kTfLiteUInt8: return GatherNd<uint8_t, IndicesT>(params, indices, output); case kTfLiteInt8: return GatherNd<int8_t, IndicesT>(params, indices, output); case kTfLiteInt16: return GatherNd<int16_t, IndicesT>(params, indices, output); case kTfLiteInt32: return GatherNd<int32_t, IndicesT>(params, indices, output); case kTfLiteInt64: return GatherNd<int64_t, IndicesT>(params, indices, output); case kTfLiteString: return GatherNdString<IndicesT>(params, indices, output); default: TF_LITE_KERNEL_LOG(context, "Params type ‘%s’ are not supported by gather_nd.", TfLiteTypeGetName(params->type)); return kTfLiteError; } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* params; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms)); const TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Prevent division by 0 in the helper. // In TF, GatherND supports empty `params` only when `indices` is also empty. TF_LITE_ENSURE(context, (NumElements(params) == 0 && NumElements(indices) == 0) || NumElements(params) > 0); switch (indices->type) { case kTfLiteInt32: return EvalGatherNd<int32_t>(context, params, indices, output); case kTfLiteInt64: return EvalGatherNd<int64_t>(context, params, indices, output); default: TF_LITE_KERNEL_LOG(context, "Indices of type ‘%s’ are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } } } // namespace gather_nd TfLiteRegistration* Register_GATHER_ND() { static TfLiteRegistration r = {/*init*/ nullptr, /*free*/ nullptr, gather_nd::Prepare, gather_nd::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
Related news
### Impact The [`GatherNd`](https://github.com/tensorflow/tensorflow/blob/f463040eb3997e42e60a2ffc6dc72de7ef11dbb4/tensorflow/lite/kernels/gather_nd.cc#L105-L111) function takes arguments that determine the sizes of inputs and outputs. If the inputs given are greater than or equal to the sizes of the outputs, an out-of-bounds memory read is triggered. ### Patches We have patched the issue in GitHub commit [595a65a3e224a0362d7e68c2213acfc2b499a196](https://github.com/tensorflow/tensorflow/commit/595a65a3e224a0362d7e68c2213acfc2b499a196). The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range. ### For more information Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions. ### Attribution This vu...