[Security] Fix Security vulnerability issue 25/290425/1
authorSangjung Woo <sangjung.woo@samsung.com>
Tue, 21 Mar 2023 07:37:13 +0000 (16:37 +0900)
committerSangjung Woo <sangjung.woo@samsung.com>
Fri, 24 Mar 2023 09:50:01 +0000 (18:50 +0900)
This patch fixes the below security vulnerability issues.

* CVE-2020-15203
* CVE-2022-29206
* CVE-2022-29201
* CVE-2020-15208

Change-Id: I811d70501151d5a6a813e817e2418ec53cd42445
Signed-off-by: Sangjung Woo <sangjung.woo@samsung.com>
packaging/CVE-2020-15203.patch [new file with mode: 0644]
packaging/CVE-2020-15208.patch [new file with mode: 0644]
packaging/CVE-2022-29201.patch [new file with mode: 0644]
packaging/CVE-2022-29206.patch [new file with mode: 0644]
packaging/tensorflow.spec

diff --git a/packaging/CVE-2020-15203.patch b/packaging/CVE-2020-15203.patch
new file mode 100644 (file)
index 0000000..d007af3
--- /dev/null
@@ -0,0 +1,334 @@
+From 33be22c65d86256e6826666662e40dbdfe70ee83 Mon Sep 17 00:00:00 2001
+From: Mihai Maruseac <mihaimaruseac@google.com>
+Date: Fri, 18 Sep 2020 16:54:17 -0700
+Subject: [PATCH] Prevent format string vulnerability in
+ `tf.strings.as_string`.
+
+The `printf` format specifier only allows `#`, `0`, `-`, `+` and space as flag characters. Others are interpreted as width/precision/length modifier or conversion specifiers. If a character does not fit into any of these sets `printf` just displays it.
+
+Also add a test suite for `tf.strings.as_string`. Also fix the issue where the flag character was used only if width was specified.
+
+PiperOrigin-RevId: 332553548
+Change-Id: Ie57cf2a7c14d1a36097642794c14329db669bbba
+---
+ tensorflow/core/kernels/BUILD                |  18 ++
+ tensorflow/core/kernels/as_string_op.cc      |  19 +-
+ tensorflow/core/kernels/as_string_op_test.cc | 245 +++++++++++++++++++
+ 3 files changed, 281 insertions(+), 1 deletion(-)
+ create mode 100644 tensorflow/core/kernels/as_string_op_test.cc
+
+diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
+index a5781701599..9671c5a621e 100644
+--- a/tensorflow/core/kernels/BUILD
++++ b/tensorflow/core/kernels/BUILD
+@@ -5228,6 +5228,24 @@ tf_kernel_library(
+     deps = STRING_DEPS,
+ )
++tf_cc_test(
++    name = "as_string_op_test",
++    size = "small",
++    srcs = ["as_string_op_test.cc"],
++    deps = [
++        ":as_string_op",
++        ":ops_testutil",
++        ":ops_util",
++        "//tensorflow/core:core_cpu",
++        "//tensorflow/core:framework",
++        "//tensorflow/core:lib",
++        "//tensorflow/core:protos_all_cc",
++        "//tensorflow/core:test",
++        "//tensorflow/core:test_main",
++        "//tensorflow/core:testlib",
++    ],
++)
++
+ tf_kernel_library(
+     name = "unicode_ops",
+     prefix = "unicode_ops",
+diff --git a/tensorflow/core/kernels/as_string_op.cc b/tensorflow/core/kernels/as_string_op.cc
+index 8341909fbc8..b9af976a654 100644
+--- a/tensorflow/core/kernels/as_string_op.cc
++++ b/tensorflow/core/kernels/as_string_op.cc
+@@ -65,9 +65,26 @@ class AsStringOp : public OpKernel {
+     OP_REQUIRES(ctx, !(scientific && shortest),
+                 errors::InvalidArgument(
+                     "Cannot select both scientific and shortest notation"));
++
+     format_ = "%";
++    if (!fill_string.empty()) {
++      switch (fill_string[0]) {
++        case ' ':
++        case '+':
++        case '-':
++        case '0':
++        case '#':
++          strings::Appendf(&format_, "%s", fill_string.c_str());
++          break;
++        default:
++          bool fill_not_supported = true;
++          OP_REQUIRES(ctx, !fill_not_supported,
++                      errors::InvalidArgument("Fill argument not supported: \"",
++                                              fill_string, "\""));
++      }
++    }
+     if (width > -1) {
+-      strings::Appendf(&format_, "%s%d", fill_string.c_str(), width);
++      strings::Appendf(&format_, "%d", width);
+     }
+     if (precision > -1) {
+       strings::Appendf(&format_, ".%d", precision);
+diff --git a/tensorflow/core/kernels/as_string_op_test.cc b/tensorflow/core/kernels/as_string_op_test.cc
+new file mode 100644
+index 00000000000..dff78e25e72
+--- /dev/null
++++ b/tensorflow/core/kernels/as_string_op_test.cc
+@@ -0,0 +1,245 @@
++/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++    http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++==============================================================================*/
++
++#include "tensorflow/core/framework/fake_input.h"
++#include "tensorflow/core/framework/node_def_builder.h"
++#include "tensorflow/core/framework/tensor.h"
++#include "tensorflow/core/framework/tensor_testutil.h"
++#include "tensorflow/core/framework/types.h"
++#include "tensorflow/core/kernels/ops_testutil.h"
++#include "tensorflow/core/kernels/ops_util.h"
++#include "tensorflow/core/lib/core/status_test_util.h"
++
++namespace tensorflow {
++namespace {
++
++class AsStringGraphTest : public OpsTestBase {
++ protected:
++  Status Init(DataType input_type, const string& fill = "", int width = -1,
++              int precision = -1, bool scientific = false,
++              bool shortest = false) {
++    TF_CHECK_OK(NodeDefBuilder("op", "AsString")
++                    .Input(FakeInput(input_type))
++                    .Attr("fill", fill)
++                    .Attr("precision", precision)
++                    .Attr("scientific", scientific)
++                    .Attr("shortest", shortest)
++                    .Attr("width", width)
++                    .Finalize(node_def()));
++    return InitOp();
++  }
++};
++
++TEST_F(AsStringGraphTest, Int8) {
++  TF_ASSERT_OK(Init(DT_INT8));
++
++  AddInputFromArray<int8>(TensorShape({3}), {-42, 0, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(&expected, {"-42", "0", "42"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, Int64) {
++  TF_ASSERT_OK(Init(DT_INT64));
++
++  AddInputFromArray<int64>(TensorShape({3}), {-42, 0, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(&expected, {"-42", "0", "42"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FloatDefault) {
++  TF_ASSERT_OK(Init(DT_FLOAT));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(
++      &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FloatScientific) {
++  TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
++                    /*scientific=*/true));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(&expected, {"-4.200000e+01", "0.000000e+00",
++                                        "3.141590e+00", "4.200000e+01"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FloatShortest) {
++  TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
++                    /*scientific=*/false, /*shortest=*/true));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(&expected, {"-42", "0", "3.14159", "42"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FloatPrecisionOnly) {
++  TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/2));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(&expected, {"-42.00", "0.00", "3.14", "42.00"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FloatWidthOnly) {
++  TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(
++      &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, Float_5_2_Format) {
++  TF_ASSERT_OK(Init(DT_FLOAT, /*fill=*/"", /*width=*/5, /*precision=*/2));
++
++  AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({4}));
++  test::FillValues<tstring>(&expected, {"-42.00", " 0.00", " 3.14", "42.00"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, Complex) {
++  TF_ASSERT_OK(Init(DT_COMPLEX64, /*fill=*/"", /*width=*/5, /*precision=*/2));
++
++  AddInputFromArray<complex64>(TensorShape({3}), {{-4, 2}, {0}, {3.14159, -1}});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(
++      &expected, {"(-4.00, 2.00)", "( 0.00, 0.00)", "( 3.14,-1.00)"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, Bool) {
++  TF_ASSERT_OK(Init(DT_BOOL));
++
++  AddInputFromArray<bool>(TensorShape({2}), {true, false});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({2}));
++  test::FillValues<tstring>(&expected, {"true", "false"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, String) {
++  Status s = Init(DT_STRING);
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(absl::StrContains(
++      s.error_message(),
++      "Value for attr 'T' of string is not in the list of allowed values"));
++}
++
++TEST_F(AsStringGraphTest, OnlyOneOfScientificAndShortest) {
++  Status s = Init(DT_FLOAT, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
++                  /*scientific=*/true, /*shortest=*/true);
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(
++      absl::StrContains(s.error_message(),
++                        "Cannot select both scientific and shortest notation"));
++}
++
++TEST_F(AsStringGraphTest, NoShortestForNonFloat) {
++  Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
++                  /*scientific=*/false, /*shortest=*/true);
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(absl::StrContains(
++      s.error_message(),
++      "scientific and shortest format not supported for datatype"));
++}
++
++TEST_F(AsStringGraphTest, NoScientificForNonFloat) {
++  Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/-1,
++                  /*scientific=*/true);
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(absl::StrContains(
++      s.error_message(),
++      "scientific and shortest format not supported for datatype"));
++}
++
++TEST_F(AsStringGraphTest, NoPrecisionForNonFloat) {
++  Status s = Init(DT_INT32, /*fill=*/"", /*width=*/-1, /*precision=*/5);
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(absl::StrContains(s.error_message(),
++                                "precision not supported for datatype"));
++}
++
++TEST_F(AsStringGraphTest, LongFill) {
++  Status s = Init(DT_INT32, /*fill=*/"asdf");
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(absl::StrContains(s.error_message(),
++                                "Fill string must be one or fewer characters"));
++}
++
++TEST_F(AsStringGraphTest, FillWithZero) {
++  TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"0", /*width=*/4));
++
++  AddInputFromArray<int64>(TensorShape({3}), {-42, 0, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(&expected, {"-042", "0000", "0042"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FillWithSpace) {
++  TF_ASSERT_OK(Init(DT_INT64, /*fill=*/" ", /*width=*/4));
++
++  AddInputFromArray<int64>(TensorShape({3}), {-42, 0, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(&expected, {" -42", "   0", "  42"});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FillWithChar1) {
++  TF_ASSERT_OK(Init(DT_INT64, /*fill=*/"-", /*width=*/4));
++
++  AddInputFromArray<int64>(TensorShape({3}), {-42, 0, 42});
++  TF_ASSERT_OK(RunOpKernel());
++  Tensor expected(allocator(), DT_STRING, TensorShape({3}));
++  test::FillValues<tstring>(&expected, {"-42 ", "0   ", "42  "});
++  test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
++}
++
++TEST_F(AsStringGraphTest, FillWithChar3) {
++  Status s = Init(DT_INT32, /*fill=*/"s");
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(
++      absl::StrContains(s.error_message(), "Fill argument not supported"));
++}
++
++TEST_F(AsStringGraphTest, FillWithChar4) {
++  Status s = Init(DT_INT32, /*fill=*/"n");
++  ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
++  ASSERT_TRUE(
++      absl::StrContains(s.error_message(), "Fill argument not supported"));
++}
++
++}  // end namespace
++}  // end namespace tensorflow
+-- 
+2.25.1
+
diff --git a/packaging/CVE-2020-15208.patch b/packaging/CVE-2020-15208.patch
new file mode 100644 (file)
index 0000000..f229474
--- /dev/null
@@ -0,0 +1,31 @@
+From 8ee24e7949a203d234489f9da2c5bf45a7d5157d Mon Sep 17 00:00:00 2001
+From: Mihai Maruseac <mihaimaruseac@google.com>
+Date: Fri, 18 Sep 2020 14:19:26 -0700
+Subject: [PATCH] [tflite] Ensure `MatchingDim` does not allow buffer overflow.
+
+We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes.
+
+A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits.
+
+PiperOrigin-RevId: 332526127
+Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4
+---
+ tensorflow/lite/kernels/internal/types.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tensorflow/lite/kernels/internal/types.h b/tensorflow/lite/kernels/internal/types.h
+index 9db742ddf03..b077686dc15 100644
+--- a/tensorflow/lite/kernels/internal/types.h
++++ b/tensorflow/lite/kernels/internal/types.h
+@@ -438,7 +438,7 @@ int MatchingArraySize(const ArrayType1& array1, int index1,
+ inline int MatchingDim(const RuntimeShape& shape1, int index1,
+                        const RuntimeShape& shape2, int index2) {
+   TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2));
+-  return shape1.Dims(index1);
++  return std::min(shape1.Dims(index1), shape2.Dims(index2));
+ }
+ template <typename... Args>
+-- 
+2.25.1
+
diff --git a/packaging/CVE-2022-29201.patch b/packaging/CVE-2022-29201.patch
new file mode 100644 (file)
index 0000000..9e974d3
--- /dev/null
@@ -0,0 +1,230 @@
+From 6d11da258aca86eef2ffce486234e7c717219cde Mon Sep 17 00:00:00 2001
+From: Antonio Sanchez <cantonios@google.com>
+Date: Fri, 29 Apr 2022 15:22:06 -0700
+Subject: [PATCH] Fix undefined behavior in QuantizedConv2D
+
+Added more input validation and tests.  Prior to this, we could get
+`nullptr` exceptions when attempting to access 0th elements of 0-sized
+inputs, leading to security vulnerability bugs.
+
+Also needed to modify `quantized_conv_ops_test.cc` for consistency.
+Previously the CPU kernel did technically support passing tensors
+of rank larger than 0 for min/max values.  However, the XLA kernels do not.
+
+PiperOrigin-RevId: 445518507
+---
+ tensorflow/core/kernels/quantized_conv_ops.cc | 24 +++++--
+ .../core/kernels/quantized_conv_ops_test.cc   | 40 +++++------
+ .../python/ops/quantized_conv_ops_test.py     | 67 +++++++++++++++++++
+ 3 files changed, 107 insertions(+), 24 deletions(-)
+
+diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc
+index 5b3570edff..a9e3b36c81 100644
+--- a/tensorflow/core/kernels/quantized_conv_ops.cc
++++ b/tensorflow/core/kernels/quantized_conv_ops.cc
+@@ -30,6 +30,7 @@ limitations under the License.
+ #include "tensorflow/core/kernels/quantization_utils.h"
+ #include "tensorflow/core/kernels/reference_gemm.h"
+ #include "tensorflow/core/lib/core/errors.h"
++#include "tensorflow/core/platform/errors.h"
+ #include "tensorflow/core/util/padding.h"
+ namespace tensorflow {
+@@ -490,11 +491,26 @@ class QuantizedConv2DOp : public OpKernel {
+     // For 2D convolution, there should be 4 dimensions.
+     OP_REQUIRES(context, input.dims() == 4,
+-                errors::InvalidArgument("input must be 4-dimensional",
+-                                        input.shape().DebugString()));
++                errors::InvalidArgument("input must be rank 4 but is rank ",
++                                        input.shape().dims()));
+     OP_REQUIRES(context, filter.dims() == 4,
+-                errors::InvalidArgument("filter must be 4-dimensional: ",
+-                                        filter.shape().DebugString()));
++                errors::InvalidArgument("filter must be rank 4 but is rank ",
++                                        filter.shape().dims()));
++
++    OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(2).shape()),
++                errors::InvalidArgument("min_input must be rank 0 but is rank ",
++                                        context->input(2).shape().dims()));
++    OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(3).shape()),
++                errors::InvalidArgument("max_input must be rank 0 but is rank ",
++                                        context->input(3).shape().dims()));
++    OP_REQUIRES(
++        context, TensorShapeUtils::IsScalar(context->input(4).shape()),
++        errors::InvalidArgument("min_filter must be rank 0 but is rank ",
++                                context->input(4).shape().dims()));
++    OP_REQUIRES(
++        context, TensorShapeUtils::IsScalar(context->input(5).shape()),
++        errors::InvalidArgument("max_filter must be rank 0 but is rank ",
++                                context->input(5).shape().dims()));
+     const float min_input = context->input(2).flat<float>()(0);
+     const float max_input = context->input(3).flat<float>()(0);
+diff --git a/tensorflow/core/kernels/quantized_conv_ops_test.cc b/tensorflow/core/kernels/quantized_conv_ops_test.cc
+index 4226378bb6..dd0878a36d 100644
+--- a/tensorflow/core/kernels/quantized_conv_ops_test.cc
++++ b/tensorflow/core/kernels/quantized_conv_ops_test.cc
+@@ -91,10 +91,10 @@ TEST_F(QuantizedConv2DTest, Small) {
+                             image_quantized.flat<quint8>());
+   AddInputFromArray<quint8>(filter_quantized.shape(),
+                             filter_quantized.flat<quint8>());
+-  AddInputFromArray<float>(TensorShape({1}), {image_min});
+-  AddInputFromArray<float>(TensorShape({1}), {image_max});
+-  AddInputFromArray<float>(TensorShape({1}), {filter_min});
+-  AddInputFromArray<float>(TensorShape({1}), {filter_max});
++  AddInputFromArray<float>(TensorShape({}), {image_min});
++  AddInputFromArray<float>(TensorShape({}), {image_max});
++  AddInputFromArray<float>(TensorShape({}), {filter_min});
++  AddInputFromArray<float>(TensorShape({}), {filter_max});
+   TF_ASSERT_OK(RunOpKernel());
+   // We're sliding the 3x3 filter across the 3x4 image, with accesses outside
+@@ -158,10 +158,10 @@ TEST_F(QuantizedConv2DTest, Small32Bit) {
+   AddInputFromArray<quint8>(
+       TensorShape({filter_size, filter_size, depth, filter_count}),
+       {10, 40, 70, 20, 50, 80, 30, 60, 90});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
+   TF_ASSERT_OK(RunOpKernel());
+   const int expected_width = image_width;
+@@ -201,10 +201,10 @@ TEST_F(QuantizedConv2DTest, OddPadding) {
+   AddInputFromArray<quint8>(
+       TensorShape({filter_size, filter_size, depth, filter_count}),
+       {1, 2, 3, 4, 5, 6, 7, 8, 9});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
+   TF_ASSERT_OK(RunOpKernel());
+   const int expected_width = image_width / stride;
+@@ -244,10 +244,10 @@ TEST_F(QuantizedConv2DTest, OddPaddingBatch) {
+   AddInputFromArray<quint8>(
+       TensorShape({filter_size, filter_size, depth, filter_count}),
+       {1, 2, 3, 4, 5, 6, 7, 8, 9});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
+-  AddInputFromArray<float>(TensorShape({1}), {0});
+-  AddInputFromArray<float>(TensorShape({1}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
++  AddInputFromArray<float>(TensorShape({}), {0});
++  AddInputFromArray<float>(TensorShape({}), {255.0f});
+   TF_ASSERT_OK(RunOpKernel());
+   const int expected_width = image_width / stride;
+@@ -302,10 +302,10 @@ TEST_F(QuantizedConv2DTest, SmallWithNoZero) {
+                             image_quantized.flat<quint8>());
+   AddInputFromArray<quint8>(filter_quantized.shape(),
+                             filter_quantized.flat<quint8>());
+-  AddInputFromArray<float>(TensorShape({1}), {image_min});
+-  AddInputFromArray<float>(TensorShape({1}), {image_max});
+-  AddInputFromArray<float>(TensorShape({1}), {filter_min});
+-  AddInputFromArray<float>(TensorShape({1}), {filter_max});
++  AddInputFromArray<float>(TensorShape({}), {image_min});
++  AddInputFromArray<float>(TensorShape({}), {image_max});
++  AddInputFromArray<float>(TensorShape({}), {filter_min});
++  AddInputFromArray<float>(TensorShape({}), {filter_max});
+   TF_ASSERT_OK(RunOpKernel());
+   const int expected_width = image_width;
+   const int expected_height = image_height * filter_count;
+diff --git a/tensorflow/python/ops/quantized_conv_ops_test.py b/tensorflow/python/ops/quantized_conv_ops_test.py
+index 6b469a954f..dbf352618f 100644
+--- a/tensorflow/python/ops/quantized_conv_ops_test.py
++++ b/tensorflow/python/ops/quantized_conv_ops_test.py
+@@ -22,6 +22,8 @@ import numpy as np
+ from tensorflow.python.framework import constant_op
+ from tensorflow.python.framework import dtypes
++from tensorflow.python.framework import errors
++from tensorflow.python.ops import math_ops
+ from tensorflow.python.ops import nn_ops
+ from tensorflow.python.platform import test
+@@ -200,6 +202,71 @@ class Conv2DTest(test.TestCase):
+         padding="SAME",
+         expected=expected_output)
++  def _testBadInputSize(self,
++                        tin=None,
++                        tfilter=None,
++                        min_input=None,
++                        max_input=None,
++                        min_filter=None,
++                        max_filter=None,
++                        error_regex=""):
++    strides = [1, 1, 1, 1]
++    padding = "SAME"
++    if tin is None:
++      tin = math_ops.cast(
++          constant_op.constant(1, shape=[1, 2, 3, 3]), dtype=dtypes.quint8)
++
++    if tfilter is None:
++      tfilter = math_ops.cast(
++          constant_op.constant(1, shape=[1, 2, 3, 3]), dtype=dtypes.quint8)
++
++    if min_input is None:
++      min_input = constant_op.constant(0, shape=[], dtype=dtypes.float32)
++
++    if max_input is None:
++      max_input = constant_op.constant(0, shape=[], dtype=dtypes.float32)
++
++    if min_filter is None:
++      min_filter = constant_op.constant(0, shape=[], dtype=dtypes.float32)
++
++    if max_filter is None:
++      max_filter = constant_op.constant(0, shape=[], dtype=dtypes.float32)
++
++    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
++                                error_regex):
++      self.evaluate(
++          nn_ops.quantized_conv2d(
++              tin,
++              tfilter,
++              out_type=dtypes.qint32,
++              strides=strides,
++              padding=padding,
++              min_input=min_input,
++              max_input=max_input,
++              min_filter=min_filter,
++              max_filter=max_filter))
++
++  def testBadInputSizes(self):
++    self._testBadInputSize(
++        tin=math_ops.cast(
++            constant_op.constant(1, shape=[1, 2]), dtype=dtypes.quint8),
++        error_regex="must be rank 4")
++    self._testBadInputSize(
++        tfilter=math_ops.cast(
++            constant_op.constant(1, shape=[1, 2]), dtype=dtypes.quint8),
++        error_regex="must be rank 4")
++    self._testBadInputSize(
++        min_input=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
++        error_regex="must be rank 0")
++    self._testBadInputSize(
++        max_input=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
++        error_regex="must be rank 0")
++    self._testBadInputSize(
++        min_filter=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
++        error_regex="must be rank 0")
++    self._testBadInputSize(
++        max_filter=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
++        error_regex="must be rank 0")
+ if __name__ == "__main__":
+   test.main()
+-- 
+2.25.1
+
diff --git a/packaging/CVE-2022-29206.patch b/packaging/CVE-2022-29206.patch
new file mode 100644 (file)
index 0000000..69bded5
--- /dev/null
@@ -0,0 +1,72 @@
+From 11ced8467eccad9c7cb94867708be8fa5c66c730 Mon Sep 17 00:00:00 2001
+From: Antonio Sanchez <cantonios@google.com>
+Date: Tue, 3 May 2022 07:51:51 -0700
+Subject: [PATCH] Fix UB in SparseTensorDenseAdd
+
+Added more input validation to avoid nullptr dereferencing and array index
+out of bounds issues.
+
+PiperOrigin-RevId: 446192704
+---
+ .../kernels/sparse_tensor_dense_add_op.cc     | 30 ++++++++++++
+ .../sparse_ops/sparse_add_op_test.py          | 46 +++++++++++++++++--
+ .../sparse_ops/sparse_ops_test.py             |  2 +-
+ 3 files changed, 72 insertions(+), 6 deletions(-)
+
+diff --git a/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc b/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc
+index 48803e4b939..6d6b05bf70f 100644
+--- a/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc
++++ b/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc
+@@ -18,6 +18,7 @@ limitations under the License.
+ #include "tensorflow/core/kernels/sparse_tensor_dense_add_op.h"
+ #include "tensorflow/core/framework/op_kernel.h"
++#include "tensorflow/core/framework/op_requires.h"
+ #include "tensorflow/core/framework/register_types.h"
+ #include "tensorflow/core/framework/tensor.h"
+ #include "tensorflow/core/framework/tensor_util.h"
+@@ -47,6 +48,17 @@ Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values,
+         a_values->shape().DebugString(), " and ",
+         a_shape->shape().DebugString());
+   }
++  int64_t nnz = a_indices->dim_size(0);
++  int64_t ndims = a_indices->dim_size(1);
++  if (a_values->dim_size(0) != nnz) {
++    return errors::InvalidArgument("Dimensions ", nnz, " and ",
++                                   a_values->dim_size(0),
++                                   " are not compatible");
++  }
++  if (a_shape->dim_size(0) != ndims) {
++    return errors::InvalidArgument("Dimensions ", ndims, " and ",
++                                   a_shape->dim_size(0), " are not compatible");
++  }
+   if (a_shape->NumElements() != b->dims()) {
+     return errors::InvalidArgument(
+         "Two operands have different ranks; received: ", a_shape->NumElements(),
+@@ -61,6 +73,24 @@ Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values,
+           a_shape_flat(i), " vs dense side ", b->dim_size(i));
+     }
+   }
++
++  // Check for invalid indices.
++  const auto a_indices_mat = a_indices->flat_inner_dims<Index>();
++
++  for (int64_t zidx = 0; zidx < nnz; ++zidx) {
++    for (int64_t didx = 0; didx < ndims; ++didx) {
++      const Index idx = a_indices_mat(zidx, didx);
++      if (idx < 0 || idx >= a_shape_flat(didx)) {
++        return errors::InvalidArgument(
++            "Sparse tensor has an invalid index on dimension ", didx,
++            ": "
++            "a_indices(",
++            zidx, ",", didx, ") = ", idx,
++            ", dense tensor shape: ", a_shape_flat);
++      }
++    }
++  }
++
+   return Status::OK();
+ }
+
+-- 
+2.25.1
index 5c05d27daf59849ff8052f853172931127dd4794..a464086835c30aff7826cf3f1899ed068abbde05 100644 (file)
@@ -17,6 +17,10 @@ Patch1001:      CVE-2020-15266.patch
 Patch1002:      CVE-2020-15265.patch
 Patch1003:      CVE-2020-26267.patch
 Patch1004:      CVE-2020-5215.patch
+Patch1005:      CVE-2020-15203.patch
+Patch1006:      CVE-2022-29206.patch
+Patch1007:      CVE-2022-29201.patch
+Patch1008:      CVE-2020-15208.patch
 
 # Exclusively for tf-lite
 Source31010:    flatbuffers-2.0.0.tar.gz
@@ -63,6 +67,10 @@ cp %{SOURCE1001} .
 %patch1002 -p1
 %patch1003 -p1
 %patch1004 -p1
+%patch1005 -p1
+%patch1006 -p1
+%patch1007 -p1
+%patch1008 -p1
 
 # Make it think git is there and returns OK in GBS/OBS environment
 cat << EOF > ./git