"softmax",
"space_to_batch_nd",
"space_to_depth",
+ "sparse_to_dense",
"split",
"squeeze",
"strided_slice",
int stride_height;
} TfLiteTransposeConvParams;
+typedef struct {
+ bool validate_indices;
+} TfLiteSparseToDenseParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
kTfLiteBuiltinSlice = 65,
kTfLiteBuiltinSin = 66,
kTfLiteBuiltinTransposeConv = 67,
+ kTfLiteBuiltinSparseToDense = 68,
} TfLiteBuiltinOperator;
#ifdef __cplusplus
}
```
+**SPARSE_TO_DENSE**
+
+```
+Inputs {
+ 0: 0D or 1D or 2D tensor
+ 1: 1D tensor
+ 2: 0D or 1D tensor
+ 3: 0D tensor
+ 4: a boolean value
+}
+Outputs {
+ 0: Dense Tensor of shape output_shape. Has the same type as sparse_values.
+}
+```
+
**SPLIT**
```
"slice.cc",
"space_to_batch_nd.cc",
"space_to_depth.cc",
+ "sparse_to_dense.cc",
"split.cc",
"squeeze.cc",
"strided_slice.cc",
],
)
+tf_cc_test(
+ name = "sparse_to_dense_test",
+ size = "small",
+ srcs = ["sparse_to_dense_test.cc"],
+ tags = ["tflite_not_portable_ios"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
filegroup(
name = "all_files",
srcs = glob(
}
}
+// For easy implementation, the indices is always a vector of size-4 vectors.
+template <typename T, typename I>
+inline void SparseToDense(const std::vector<std::vector<I>>& indices,
+ const T* values, T default_value, T* output_data,
+ const Dims<4>& output_dims, bool value_is_scalar) {
+ const int value_count = indices.size();
+
+ // First fill the output_data with default value.
+ const int num_elements = FlatSize(output_dims);
+ for (int i = 0; i < num_elements; ++i) {
+ output_data[i] = default_value;
+ }
+
+ // Special handle for value is scalar case to avoid checking the boolean
+ // condition within the loop every time.
+ if (value_is_scalar) {
+ for (int i = 0; i < value_count; ++i) {
+ const std::vector<I>& index = indices[i];
+ TFLITE_DCHECK_EQ(index.size(), 4);
+ const T value = *values; // just use the first value.
+ output_data[Offset(output_dims, index[3], index[2], index[1], index[0])] =
+ value;
+ }
+ return;
+ }
+
+ // Go through the values and indices to fill the sparse values.
+ for (int i = 0; i < value_count; ++i) {
+ const std::vector<I>& index = indices[i];
+ TFLITE_DCHECK_EQ(index.size(), 4);
+ const T value = values[i];
+ output_data[Offset(output_dims, index[3], index[2], index[1], index[0])] =
+ value;
+ }
+}
+
} // namespace reference_ops
} // namespace tflite
TfLiteRegistration* Register_SLICE();
TfLiteRegistration* Register_SIN();
TfLiteRegistration* Register_TRANSPOSE_CONV();
+TfLiteRegistration* Register_SPARSE_TO_DENSE();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
AddBuiltin(BuiltinOperator_SLICE, Register_SLICE());
AddBuiltin(BuiltinOperator_SIN, Register_SIN());
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
+ AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
--- /dev/null
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <unistd.h>
+#include <cassert>
+#include <cmath>
+#include <cstdio>
+#include <cstdlib>
+#include <iostream>
+#include <limits>
+
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+#include "tensorflow/contrib/lite/kernels/op_macros.h"
+#include "tensorflow/contrib/lite/kernels/padding.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace sparse_to_dense {
+
+constexpr int kIndicesTensor = 0;
+constexpr int kOutputShapeTensor = 1;
+constexpr int kValueInputTensor = 2;
+constexpr int kDefaultValueTensor = 3;
+constexpr int kOutputTensor = 0;
+
+constexpr int kMaxDimensions = 4;
+
+template <typename T>
+TfLiteStatus Resize(TfLiteContext* context, const TfLiteTensor* output_shape,
+ TfLiteTensor* output) {
+ const int output_dimensions = NumElements(output_shape);
+ TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(output_dimensions);
+ for (int i = 0; i < output_dimensions; ++i) {
+ output_shape_array->data[i] = GetTensorData<T>(output_shape)[i];
+ }
+
+ return context->ResizeTensor(context, output, output_shape_array);
+}
+
+TfLiteStatus CheckDimensionsMatch(TfLiteContext* context,
+ const TfLiteTensor* indices,
+ const TfLiteTensor* output_shape,
+ const TfLiteTensor* values) {
+ switch (NumDimensions(indices)) {
+ case 0:
+ case 1: {
+ if (NumDimensions(values) == 0) {
+ TF_LITE_ENSURE_EQ(context, NumElements(indices), NumElements(values));
+ }
+ TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 1);
+ break;
+ }
+ case 2: {
+ TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 1),
+ NumElements(output_shape));
+ if (NumDimensions(values) == 0)
+ TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
+ NumElements(values));
+ break;
+ }
+ default:
+ context->ReportError(
+ context, "Wrong indices dimensions %d, should be less than 3.",
+ NumDimensions(indices));
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+// Convert indices into a vector of 4-d vectors.
+// TODO(renjieliu): Revisit here to improve the performance, since multiple
+// allocations of std::vectors will be quite slow on phones.
+template <typename T>
+TfLiteStatus GetIndicesVector(TfLiteContext* context,
+ const TfLiteTensor* indices,
+ const int num_indices,
+ std::vector<std::vector<T>>* indices_vector) {
+ // Note because TfLite will reverse the dimensions, so pad zeros upfront.
+ switch (NumDimensions(indices)) {
+ case 0:
+ case 1: {
+ const auto indices_data = GetTensorData<T>(indices);
+ for (int i = 0; i < num_indices; ++i) {
+ std::vector<T> index({0, 0, 0, indices_data[i]});
+ indices_vector->push_back(index);
+ }
+ break;
+ }
+ case 2: {
+ const int true_dimensions = SizeOfDimension(indices, 1);
+ TF_LITE_ENSURE(context, true_dimensions <= kMaxDimensions);
+ for (int i = 0; i < num_indices; ++i) {
+ std::vector<T> index;
+ index.reserve(kMaxDimensions);
+ // Fill the index with 1 up to kMaxDimensions - true_dimensions to
+ // satisfy the needs for 4-dimension index.
+ for (int j = 0; j < kMaxDimensions - true_dimensions; ++j) {
+ index.push_back(0);
+ }
+ for (int j = 0; j < true_dimensions; ++j) {
+ index.push_back(GetTensorData<T>(indices)[i * true_dimensions + j]);
+ }
+
+ indices_vector->push_back(index);
+ }
+ break;
+ }
+ default:
+ context->ReportError(context,
+ "Indices dimensions problem, got %d dimensions",
+ NumDimensions(indices));
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ResizeOutputShape(TfLiteContext* context,
+ const TfLiteTensor* output_shape,
+ TfLiteTensor* output) {
+ if (output_shape->type == kTfLiteInt32) {
+ return Resize<int32_t>(context, output_shape, output);
+ } else if (output_shape->type == kTfLiteInt64) {
+ return Resize<int64_t>(context, output_shape, output);
+ } else {
+ context->ReportError(context, "Dense shape type %d not supported.",
+ output_shape->type);
+ return kTfLiteError;
+ }
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
+ const TfLiteTensor* output_shape =
+ GetInput(context, node, kOutputShapeTensor);
+ const TfLiteTensor* values = GetInput(context, node, kValueInputTensor);
+ const TfLiteTensor* default_value =
+ GetInput(context, node, kDefaultValueTensor);
+
+ // TODO(renjieliu): Handle validate_indices.
+
+ // Indices can be 0-D, 1-D or 2-D.
+ TF_LITE_ASSERT(NumDimensions(indices) >= 0);
+ TF_LITE_ENSURE(context, NumDimensions(indices) < 3);
+ TF_LITE_ASSERT(NumDimensions(output_shape) >= 0);
+ TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
+ // Values can be 0-D or 1-D.
+ TF_LITE_ASSERT(NumDimensions(values) >= 0);
+ TF_LITE_ENSURE(context, NumDimensions(values) < 2);
+
+ TF_LITE_ENSURE_EQ(context, NumElements(default_value), 1);
+
+ TF_LITE_ENSURE(
+ context, indices->type == kTfLiteInt32 || indices->type == kTfLiteInt64);
+ TF_LITE_ENSURE(context, output_shape->type == kTfLiteInt32 ||
+ output_shape->type == kTfLiteInt64);
+ TF_LITE_ENSURE_EQ(context, values->type, default_value->type);
+
+ // Ensure dimensions match.
+ TF_LITE_ENSURE_OK(
+ context, CheckDimensionsMatch(context, indices, output_shape, values));
+
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
+
+ if (!IsConstantTensor(output_shape)) {
+ SetTensorToDynamic(output);
+ return kTfLiteOk;
+ }
+ return ResizeOutputShape(context, output_shape, output);
+}
+
+template <typename T, typename I>
+TfLiteStatus SparseToDenseImpl(TfLiteContext* context, TfLiteNode* node) {
+ const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
+ const TfLiteTensor* output_shape =
+ GetInput(context, node, kOutputShapeTensor);
+ const TfLiteTensor* values = GetInput(context, node, kValueInputTensor);
+ const TfLiteTensor* default_value =
+ GetInput(context, node, kDefaultValueTensor);
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+
+ if (IsDynamicTensor(output)) {
+ TF_LITE_ENSURE_OK(context,
+ ResizeOutputShape(context, output_shape, output));
+ }
+
+ const int num_indices = SizeOfDimension(indices, 0);
+ const bool value_is_scalar = NumDimensions(values) == 0;
+ std::vector<std::vector<I>> indices_vector;
+ indices_vector.reserve(num_indices);
+ TF_LITE_ENSURE_OK(context, GetIndicesVector<I>(context, indices, num_indices,
+ &indices_vector));
+ reference_ops::SparseToDense(indices_vector, GetTensorData<T>(values),
+ *GetTensorData<T>(default_value),
+ GetTensorData<T>(output), GetTensorDims(output),
+ value_is_scalar);
+ return kTfLiteOk;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
+ const TfLiteTensor* values = GetInput(context, node, kValueInputTensor);
+
+ // Currently only supports float32 and int32.
+ switch (values->type) {
+ case kTfLiteFloat32: {
+ switch (indices->type) {
+ case kTfLiteInt32: {
+ return SparseToDenseImpl<float, int32_t>(context, node);
+ }
+ case kTfLiteInt64: {
+ return SparseToDenseImpl<float, int64_t>(context, node);
+ }
+ default:
+ context->ReportError(
+ context, "Type %d is currently not supported by sparse to dense.",
+ indices->type);
+ return kTfLiteError;
+ }
+ break;
+ }
+ case kTfLiteInt32: {
+ switch (indices->type) {
+ case kTfLiteInt32: {
+ return SparseToDenseImpl<int32_t, int32_t>(context, node);
+ }
+ case kTfLiteInt64: {
+ return SparseToDenseImpl<int32_t, int64_t>(context, node);
+ }
+ default:
+ context->ReportError(
+ context, "Type %d is currently not supported by sparse to dense.",
+ indices->type);
+ return kTfLiteError;
+ }
+ break;
+ }
+ default:
+ context->ReportError(
+ context, "Type %d is currently not supported by sparse to dense.",
+ values->type);
+ return kTfLiteError;
+ }
+}
+
+} // namespace sparse_to_dense
+
+TfLiteRegistration* Register_SPARSE_TO_DENSE() {
+ static TfLiteRegistration r = {nullptr, nullptr, sparse_to_dense::Prepare,
+ sparse_to_dense::Eval};
+ return &r;
+}
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
--- /dev/null
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <cstdarg>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+template <typename T>
+class SparseToDenseOpModel : public SingleOpModel {
+ public:
+ SparseToDenseOpModel(std::initializer_list<int> indices_shape,
+ std::initializer_list<int> output_shape_shape,
+ std::initializer_list<int> values_shape, T default_value,
+ TensorType tensor_index_type,
+ TensorType tensor_input_type) {
+ indices_ = AddInput(tensor_index_type);
+ output_shape_ = AddInput(TensorType_INT32);
+ values_ = AddInput(tensor_input_type);
+ default_value_ = AddInput(tensor_input_type);
+ output_ = AddOutput(tensor_input_type);
+
+ SetBuiltinOp(BuiltinOperator_SPARSE_TO_DENSE,
+ BuiltinOptions_SparseToDenseOptions,
+ CreateSparseToDenseOptions(builder_, false).Union());
+ BuildInterpreter({indices_shape, output_shape_shape, values_shape, {1}});
+
+ PopulateTensor<T>(default_value_, {default_value});
+ }
+
+ int indices() { return indices_; }
+ int output_shape() { return output_shape_; }
+ int values() { return values_; }
+
+ std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int indices_;
+ int output_shape_;
+ int values_;
+ int default_value_;
+ int output_;
+};
+
+TEST(SparseToDenseOpModelTest, ZeroDimensionTest) {
+ SparseToDenseOpModel<float> m({1}, {1}, {1}, 0, TensorType_INT32,
+ TensorType_FLOAT32);
+ m.PopulateTensor<int32_t>(m.indices(), {3});
+ m.PopulateTensor<int32_t>(m.output_shape(), {5});
+ m.PopulateTensor<float>(m.values(), {7});
+ m.Invoke();
+
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 7, 0}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
+}
+
+TEST(SparseToDenseOpModelTest, OneDimensionTest) {
+ SparseToDenseOpModel<float> m({3}, {1}, {3}, 0, TensorType_INT32,
+ TensorType_FLOAT32);
+ m.PopulateTensor<int32_t>(m.indices(), {1, 3, 5});
+ m.PopulateTensor<int32_t>(m.output_shape(), {7});
+ m.PopulateTensor<float>(m.values(), {2, 4, 6});
+ m.Invoke();
+
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 2, 0, 4, 0, 6, 0}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({7}));
+}
+
+TEST(SparseToDenseOpModelTest, TwoDimensionsTest) {
+ SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, 0, TensorType_INT32,
+ TensorType_FLOAT32);
+ m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
+ m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
+ m.PopulateTensor<float>(m.values(), {2, 4, 6});
+ m.Invoke();
+
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray({2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 4, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
+}
+
+TEST(SparseToDenseOpModelTest, DefaultValueTest) {
+ SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
+ TensorType_FLOAT32);
+ m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
+ m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
+ m.PopulateTensor<float>(m.values(), {2, 4, 6});
+ m.Invoke();
+
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
+}
+
+TEST(SparseToDenseOpModelTest, IntegerValueTest) {
+ SparseToDenseOpModel<int32_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
+ TensorType_INT32);
+ m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
+ m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
+ m.PopulateTensor<int32_t>(m.values(), {2, 4, 6});
+ m.Invoke();
+
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
+}
+
+TEST(SparseToDenseOpModelTest, Int64IndexTest) {
+ SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT64,
+ TensorType_FLOAT32);
+ m.PopulateTensor<int64_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
+ m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
+ m.PopulateTensor<float>(m.values(), {2, 4, 6});
+ m.Invoke();
+
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
*builtin_data = reinterpret_cast<void*>(params);
break;
}
+ case BuiltinOperator_SPARSE_TO_DENSE: {
+ TfLiteSparseToDenseParams* params =
+ MallocPOD<TfLiteSparseToDenseParams>();
+ if (auto* sparse_to_dense_params =
+ op->builtin_options_as_SparseToDenseOptions()) {
+ params->validate_indices = sparse_to_dense_params->validate_indices();
+ }
+ *builtin_data = reinterpret_cast<void*>(params);
+ break;
+ }
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
error_reporter->Report("DELEGATE op shouldn't exist in model.");
case tflite::BuiltinOperator_SLICE:
case tflite::BuiltinOperator_SIN:
case tflite::BuiltinOperator_TRANSPOSE_CONV:
+ case tflite::BuiltinOperator_SPARSE_TO_DENSE:
FATAL("Op code %d is currently not delegated to NNAPI", builtin);
nn_op_type = -1; // set to invalid
break;
SLICE = 65,
SIN = 66,
TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
}
// Options for the builtin operators.
SelectOptions,
SliceOptions,
TransposeConvOptions,
+ SparseToDenseOptions,
}
enum Padding : byte { SAME, VALID }
stride_h:int;
}
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
struct TransposeConvOptions;
struct TransposeConvOptionsT;
+struct SparseToDenseOptions;
+struct SparseToDenseOptionsT;
+
struct OperatorCode;
struct OperatorCodeT;
BuiltinOperator_SLICE = 65,
BuiltinOperator_SIN = 66,
BuiltinOperator_TRANSPOSE_CONV = 67,
+ BuiltinOperator_SPARSE_TO_DENSE = 68,
BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_TRANSPOSE_CONV
+ BuiltinOperator_MAX = BuiltinOperator_SPARSE_TO_DENSE
};
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[67] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[68] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
BuiltinOperator_SELECT,
BuiltinOperator_SLICE,
BuiltinOperator_SIN,
- BuiltinOperator_TRANSPOSE_CONV
+ BuiltinOperator_TRANSPOSE_CONV,
+ BuiltinOperator_SPARSE_TO_DENSE
};
return values;
}
"SLICE",
"SIN",
"TRANSPOSE_CONV",
+ "SPARSE_TO_DENSE",
nullptr
};
return names;
BuiltinOptions_SelectOptions = 47,
BuiltinOptions_SliceOptions = 48,
BuiltinOptions_TransposeConvOptions = 49,
+ BuiltinOptions_SparseToDenseOptions = 50,
BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_TransposeConvOptions
+ BuiltinOptions_MAX = BuiltinOptions_SparseToDenseOptions
};
-inline BuiltinOptions (&EnumValuesBuiltinOptions())[50] {
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[51] {
static BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
BuiltinOptions_LessEqualOptions,
BuiltinOptions_SelectOptions,
BuiltinOptions_SliceOptions,
- BuiltinOptions_TransposeConvOptions
+ BuiltinOptions_TransposeConvOptions,
+ BuiltinOptions_SparseToDenseOptions
};
return values;
}
"SelectOptions",
"SliceOptions",
"TransposeConvOptions",
+ "SparseToDenseOptions",
nullptr
};
return names;
static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
};
+template<> struct BuiltinOptionsTraits<SparseToDenseOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
+};
+
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
return type == BuiltinOptions_TransposeConvOptions ?
reinterpret_cast<const TransposeConvOptionsT *>(value) : nullptr;
}
+ SparseToDenseOptionsT *AsSparseToDenseOptions() {
+ return type == BuiltinOptions_SparseToDenseOptions ?
+ reinterpret_cast<SparseToDenseOptionsT *>(value) : nullptr;
+ }
+ const SparseToDenseOptionsT *AsSparseToDenseOptions() const {
+ return type == BuiltinOptions_SparseToDenseOptions ?
+ reinterpret_cast<const SparseToDenseOptionsT *>(value) : nullptr;
+ }
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
flatbuffers::Offset<TransposeConvOptions> CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct SparseToDenseOptionsT : public flatbuffers::NativeTable {
+ typedef SparseToDenseOptions TableType;
+ bool validate_indices;
+ SparseToDenseOptionsT()
+ : validate_indices(false) {
+ }
+};
+
+struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SparseToDenseOptionsT NativeTableType;
+ enum {
+ VT_VALIDATE_INDICES = 4
+ };
+ bool validate_indices() const {
+ return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
+ verifier.EndTable();
+ }
+ SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SparseToDenseOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SparseToDenseOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_validate_indices(bool validate_indices) {
+ fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast<uint8_t>(validate_indices), 0);
+ }
+ explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &);
+ flatbuffers::Offset<SparseToDenseOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ bool validate_indices = false) {
+ SparseToDenseOptionsBuilder builder_(_fbb);
+ builder_.add_validate_indices(validate_indices);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType;
BuiltinOperator builtin_code;
const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const {
return builtin_options_type() == BuiltinOptions_TransposeConvOptions ? static_cast<const TransposeConvOptions *>(builtin_options()) : nullptr;
}
+ const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const {
+ return builtin_options_type() == BuiltinOptions_SparseToDenseOptions ? static_cast<const SparseToDenseOptions *>(builtin_options()) : nullptr;
+ }
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
return builtin_options_as_TransposeConvOptions();
}
+template<> inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const {
+ return builtin_options_as_SparseToDenseOptions();
+}
+
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
_stride_h);
}
+inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SparseToDenseOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = validate_indices(); _o->validate_indices = _e; };
+}
+
+inline flatbuffers::Offset<SparseToDenseOptions> SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSparseToDenseOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SparseToDenseOptions> CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _validate_indices = _o->validate_indices;
+ return tflite::CreateSparseToDenseOptions(
+ _fbb,
+ _validate_indices);
+}
+
inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new OperatorCodeT();
UnPackTo(_o, _resolver);
auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
return verifier.VerifyTable(ptr);
}
+ case BuiltinOptions_SparseToDenseOptions: {
+ auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return false;
}
}
auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
return ptr->UnPack(resolver);
}
+ case BuiltinOptions_SparseToDenseOptions: {
+ auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
auto ptr = reinterpret_cast<const TransposeConvOptionsT *>(value);
return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union();
}
+ case BuiltinOptions_SparseToDenseOptions: {
+ auto ptr = reinterpret_cast<const SparseToDenseOptionsT *>(value);
+ return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
value = new TransposeConvOptionsT(*reinterpret_cast<TransposeConvOptionsT *>(u.value));
break;
}
+ case BuiltinOptions_SparseToDenseOptions: {
+ value = new SparseToDenseOptionsT(*reinterpret_cast<SparseToDenseOptionsT *>(u.value));
+ break;
+ }
default:
break;
}
delete ptr;
break;
}
+ case BuiltinOptions_SparseToDenseOptions: {
+ auto ptr = reinterpret_cast<SparseToDenseOptionsT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
- " --input_shapes=%s" % shape_str +
" --output_arrays=%s" % ",".join(output_arrays))
+ if shape_str:
+ s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
return value.astype(dtype)
+def create_scalar_data(dtype, min_value=-100, max_value=100):
+ """Build scalar tensor data range from min_value to max_value exclusively."""
+
+ if dtype in _TF_TYPE_INFO:
+ dtype = _TF_TYPE_INFO[dtype][0]
+
+ if dtype in (tf.float32, tf.float16):
+ value = (max_value - min_value) * np.random.random() + min_value
+ elif dtype in (tf.int32, tf.uint8, tf.int64):
+ value = np.random.randint(min_value, max_value + 1)
+ return np.array(value, dtype=dtype)
+
+
def freeze_graph(session, outputs):
"""Freeze the current graph.
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_sparse_to_dense_tests(zip_path):
+ """Make a set of tests to do sparse to dense."""
+
+ test_parameters = [{
+ "value_dtype": [tf.float32, tf.int32],
+ "index_dtype": [tf.int32, tf.int64],
+ "value_count": [1, 3, 6, 8],
+ "dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
+ "default_value": [0, -1],
+ "value_is_scalar": [True, False],
+ }]
+
+ # Return a single value for 1-D dense shape, but a tuple for other shapes.
+ def generate_index(dense_shape):
+ if len(dense_shape) == 1:
+ return np.random.randint(dense_shape[0])
+ else:
+ index = []
+ for shape in dense_shape:
+ index.append(np.random.randint(shape))
+ return tuple(index)
+
+ def build_graph(parameters):
+ """Build the sparse_to_dense op testing graph."""
+ dense_shape = parameters["dense_shape"]
+
+ # Special handle for value_is_scalar case.
+ # value_count must be 1.
+ if parameters["value_is_scalar"] and parameters["value_count"] == 1:
+ value = tf.placeholder(
+ name="value", dtype=parameters["value_dtype"], shape=())
+ else:
+ value = tf.placeholder(
+ name="value",
+ dtype=parameters["value_dtype"],
+ shape=[parameters["value_count"]])
+ indices = set()
+ while len(indices) < parameters["value_count"]:
+ indices.add(generate_index(dense_shape))
+ indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
+ # TODO(renjieliu): Add test for validate_indices case.
+ out = tf.sparse_to_dense(
+ indices,
+ dense_shape,
+ value,
+ parameters["default_value"],
+ validate_indices=False)
+
+ return [value], [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ if parameters["value_is_scalar"] and parameters["value_count"] == 1:
+ input_value = create_scalar_data(parameters["value_dtype"])
+ else:
+ input_value = create_tensor_data(parameters["value_dtype"],
+ [parameters["value_count"]])
+ return [input_value], sess.run(
+ outputs, feed_dict=dict(zip(inputs, [input_value])))
+
+ make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+
# Toco binary path provided by the generate rule.
bin_path = None
(*comparison_op->mutable_attr())["T"].set_type(data_type);
}
+void ConvertSparseToDenseOperator(const Model& model,
+ const SparseToDenseOperator& src_op,
+ const char* op_name,
+ GraphDef* tensorflow_graph) {
+ auto* sparse_to_dense_op = tensorflow_graph->add_node();
+ sparse_to_dense_op->set_op(op_name);
+ sparse_to_dense_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 4);
+ for (int i = 0; i < 4; ++i) {
+ *sparse_to_dense_op->add_input() = src_op.inputs[i];
+ }
+ const auto data_type = GetTensorFlowDataType(model, src_op.inputs[3]);
+ (*sparse_to_dense_op->mutable_attr())["T"].set_type(data_type);
+ const auto index_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ (*sparse_to_dense_op->mutable_attr())["Tindices"].set_type(index_type);
+ (*sparse_to_dense_op->mutable_attr())["Tindices"].set_b(
+ src_op.validate_indices);
+}
+
void ConvertOperator(const Model& model, const Operator& src_op,
GraphDef* tensorflow_graph) {
if (src_op.fused_activation_function != FusedActivationFunctionType::kNone) {
SetDataTypeForAllOutputs(model, op, data_type_x);
break;
}
+ case OperatorType::kSparseToDense: {
+ // Select produces outputs with the same type as their 3rd input
+ CHECK_EQ(op->inputs.size(), 4);
+ const ArrayDataType data_type = model->GetArray(op->inputs[2]).data_type;
+ const ArrayDataType data_type_default =
+ model->GetArray(op->inputs[3]).data_type;
+ CHECK(data_type == data_type_default);
+ SetDataTypeForAllOutputs(model, op, data_type);
+ break;
+ }
default: {
// These operators produce outputs with the same type as their 1st input
CHECK_GT(op->inputs.size(), 0);
*output_array.mutable_shape()->mutable_dims() = output_dims;
}
+void ProcessSparseToDenseOperator(Model* model, SparseToDenseOperator* op) {
+ CHECK_EQ(op->inputs.size(), 4);
+
+ const Array& output_shape_array = model->GetArray(op->inputs[1]);
+ if (!output_shape_array.has_shape()) return;
+ CHECK_EQ(output_shape_array.shape().dimensions_count(), 1);
+
+ // Output should not go over four dimensions.
+ CHECK_LE(output_shape_array.shape().dims(0), 4);
+
+ const string& output_name = op->outputs[0];
+ Array& output_array = model->GetArray(output_name);
+ if (output_array.has_shape()) return;
+
+ CHECK(output_shape_array.data_type == ArrayDataType::kInt32 ||
+ output_shape_array.data_type == ArrayDataType::kInt64);
+ if (output_shape_array.data_type == ArrayDataType::kInt32) {
+ *output_array.mutable_shape()->mutable_dims() =
+ output_shape_array.GetBuffer<ArrayDataType::kInt32>().data;
+ } else {
+ const std::vector<int64>& output_shape_data =
+ output_shape_array.GetBuffer<ArrayDataType::kInt64>().data;
+ std::copy(
+ output_shape_data.begin(), output_shape_data.end(),
+ std::back_inserter(*output_array.mutable_shape()->mutable_dims()));
+ }
+}
+
} // namespace
bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
CHECK_EQ(op->inputs.size(), 1);
ProcessOpWithShapeInput(model, op);
break;
+ case OperatorType::kSparseToDense:
+ ProcessSparseToDenseOperator(model,
+ static_cast<SparseToDenseOperator*>(op));
+ break;
default:
// Unimplemented, another graph transformation should drop it.
LOG(FATAL) << "Unhandled operator type " << OperatorTypeName(op->type);
model->operators.emplace_back(op.release());
}
+void ConvertSparseToDenseOperator(const NodeDef& node,
+ const TensorFlowImportFlags& tf_import_flags,
+ Model* model) {
+ CHECK_EQ(node.op(), "SparseToDense");
+ CheckInputsCount(node, tf_import_flags, 4);
+
+ auto* op = new SparseToDenseOperator;
+ for (const string& input : node.input()) {
+ op->inputs.push_back(input);
+ }
+ op->outputs.push_back(node.name());
+
+ op->validate_indices = HasAttr(node, "validate_indices")
+ ? GetBoolAttr(node, "validate_indices")
+ : true;
+ model->operators.emplace_back(op);
+}
+
} // namespace
namespace internal {
ConvertSinOperator(node, tf_import_flags, model);
} else if (node.op() == "Select") {
ConvertSelectOperator(node, tf_import_flags, model);
+ } else if (node.op() == "SparseToDense") {
+ ConvertSparseToDenseOperator(node, tf_import_flags, model);
} else {
ConvertUnsupportedOperator(node, tf_import_flags, model);
}
// special nodes in the graph to shuffle axes.
kReorderAxes,
kSelect,
+ kSparseToDense,
};
// Helper to deal with TensorFlow arrays using a different ordering of
int num_partitions;
};
+// SparseToDense operator:
+//
+// Inputs:
+// Inputs[0]: required: sparse_indices.
+// Inputs[1]: required: output_shape.
+// Inputs[2]: required: sparse_values.
+//
+// TensorFlow equivalent: SparseToDense.
+struct SparseToDenseOperator : Operator {
+ SparseToDenseOperator() : Operator(OperatorType::kSparseToDense) {}
+ bool validate_indices;
+};
+
// Alloc's are used for transient arrays only. An Alloc specifies which interval
// of the "transient_data" workspace buffer passed to inference functions, is to
// be used for the transient array at hand. The 'start' and 'end' values are
int GetVersion(const Operator& op) const override { return 1; }
};
+class SparseToDense
+ : public BuiltinOperator<SparseToDenseOperator,
+ ::tflite::SparseToDenseOptions,
+ ::tflite::BuiltinOptions_SparseToDenseOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateSparseToDenseOptions(*builder, op.validate_indices);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->validate_indices = options.validate_indices();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class TensorFlowUnsupported : public BaseOperator {
public:
using BaseOperator::BaseOperator;
new ArgMax(::tflite::BuiltinOperator_ARG_MAX, OperatorType::kArgMax));
ops.emplace_back(new TransposeConv(::tflite::BuiltinOperator_TRANSPOSE_CONV,
OperatorType::kTransposeConv));
+ ops.emplace_back(new SparseToDense(::tflite::BuiltinOperator_SPARSE_TO_DENSE,
+ OperatorType::kSparseToDense));
// Custom Operators.
ops.emplace_back(
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
}
+TEST_F(OperatorTest, BuiltinSparseToDense) {
+ SparseToDenseOperator op;
+ op.validate_indices = false;
+ std::unique_ptr<toco::SparseToDenseOperator> output_toco_op =
+ SerializeAndDeserialize(
+ GetOperator("SPARSE_TO_DENSE", OperatorType::kSparseToDense), op);
+ EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
+}
+
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
HANDLE_OPERATORTYPENAME_CASE(DynamicPartition)
HANDLE_OPERATORTYPENAME_CASE(DynamicStitch)
HANDLE_OPERATORTYPENAME_CASE(Select)
+ HANDLE_OPERATORTYPENAME_CASE(SparseToDense)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE