std::make_shared<LayerCreator<ngraph::op::Squeeze>>("Squeeze"),
std::make_shared<LayerCreator<ngraph::op::Tan>>("Tan"),
std::make_shared<LayerCreator<ngraph::op::Tanh>>("TanH"),
- std::make_shared<LayerCreator<ngraph::op::Tile>>("Tile"),
+ std::make_shared<LayerCreator<ngraph::op::v0::Tile>>("Tile"),
std::make_shared<LayerCreator<ngraph::op::v1::TopK>>("TopK"),
std::make_shared<LayerCreator<ngraph::op::TensorIterator>>("TensorIterator"),
std::make_shared<LayerCreator<ngraph::op::Transpose>>("Transpose"),
// Tile layer
template <>
-std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::Tile>::createLayer(
+std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::v0::Tile>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
const GenericLayerParams& layerParsePrms) {
checkParameters(inputs, layerParsePrms, 2);
- return std::make_shared<ngraph::op::Tile>(inputs[0], inputs[1]);
+ return std::make_shared<ngraph::op::v0::Tile>(inputs[0], inputs[1]);
}
// StridedSlice layer
pass/constant_folding_slice.cpp
pass/constant_folding_split.cpp
pass/constant_folding_variadic_split.cpp
- pass/constant_folding_tile.cpp
pass/constant_folding.cpp
pass/constant_folding.hpp
pass/convert_fp32_to_fp16.hpp
runtime/reference/eval_helpers.hpp
runtime/reference/reshape.cpp
runtime/reference/reshape.hpp
+ runtime/reference/tile.cpp
+ runtime/reference/tile.hpp
shape.cpp
shape.hpp
shape_util.cpp
#include "ngraph/op/tile.hpp"
#include "ngraph/op/constant.hpp"
+#include "ngraph/runtime/reference/tile.hpp"
using namespace std;
using namespace ngraph;
-constexpr NodeTypeInfo op::Tile::type_info;
+constexpr NodeTypeInfo op::v0::Tile::type_info;
-op::Tile::Tile(const Output<Node>& data, const Output<Node>& repeats)
+op::v0::Tile::Tile(const Output<Node>& data, const Output<Node>& repeats)
: Op({data, repeats})
{
constructor_validate_and_infer_types();
return true;
}
-void op::Tile::validate_and_infer_types()
+void op::v0::Tile::validate_and_infer_types()
{
auto arg_et = get_input_element_type(0);
set_input_is_relevant_to_shape(1);
}
-shared_ptr<Node> op::Tile::clone_with_new_inputs(const OutputVector& new_args) const
+shared_ptr<Node> op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Tile>(new_args.at(0), new_args.at(1));
}
+
+bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
+{
+ const auto& data = inputs[0];
+ const auto& axis = inputs[1];
+ auto& output = outputs[0];
+ auto repeats_val = read_vector<int64_t>(axis);
+ auto repeats_rank = repeats_val.size();
+ Shape data_shape = data->get_shape();
+ auto data_rank = data_shape.size();
+ auto output_rank = std::max(data_rank, repeats_rank);
+
+ // expand data shape and repeats to output rank
+ data_shape.insert(data_shape.begin(), output_rank - data_rank, 1);
+ repeats_val.insert(repeats_val.begin(), output_rank - repeats_rank, 1);
+
+ Shape output_shape(output_rank);
+ for (size_t i = 0; i < output_rank; i++)
+ {
+ output_shape[i] = data_shape[i] * repeats_val[i];
+ }
+ runtime::reference::tile(data->get_data_ptr<const char>(),
+ output->get_data_ptr<char>(),
+ data->get_shape(),
+ output_shape,
+ data->get_element_type().size());
+
+ return true;
+}
#pragma once
#include "ngraph/op/op.hpp"
+#include "ngraph/runtime/host_tensor.hpp"
namespace ngraph
{
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
+
+ bool evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) override;
};
}
- using v0::Tile;
}
}
construct_constant_slice();
construct_constant_select();
construct_constant_one_hot();
- construct_constant_tile();
construct_constant_default();
}
void construct_constant_split();
void construct_constant_variadic_split();
void construct_constant_one_hot();
- void construct_constant_tile();
void construct_constant_default();
ngraph::BuildNodeExecutorMap m_cfmap;
+++ /dev/null
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "constant_folding.hpp"
-#include "ngraph/op/tile.hpp"
-#include "ngraph/runtime/reference/tile.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-template <typename T>
-static shared_ptr<op::Constant> fold_constant_tile(const shared_ptr<op::Constant>& data,
- const shared_ptr<Node>& tile)
-{
- runtime::AlignedBuffer buffer(shape_size(tile->get_shape()) * sizeof(T));
- T* data_ptr = buffer.get_ptr<T>();
- // No need to call the reference kernel.
- if (shape_size(tile->get_shape()) == 0)
- {
- return make_shared<op::Constant>(
- tile->get_output_element_type(0), tile->get_output_shape(0), data_ptr);
- }
-
- if (auto tile_v0 = as_type_ptr<op::v0::Tile>(tile))
- {
- runtime::reference::tile<T>(
- data->get_data_ptr<T>(), data_ptr, data->get_shape(), tile_v0->get_shape());
- }
- else
- {
- throw ngraph_error("Unsupported op in tile constant folding.");
- }
-
- return make_shared<op::Constant>(
- tile->get_output_element_type(0), tile->get_output_shape(0), data_ptr);
-}
-
-void pass::ConstantFolding::construct_constant_tile()
-{
- auto data_label = make_shared<pattern::op::Label>(
- element::f32, Shape{2, 2, 3}, pattern::has_class<op::Constant>());
- auto repeats_label =
- make_shared<pattern::op::Label>(element::i64, Shape{3}, pattern::has_class<op::Constant>());
- auto tile_v0 = make_shared<op::v0::Tile>(data_label, repeats_label);
-
- auto constant_tile_callback = [data_label](pattern::Matcher& m) {
- NGRAPH_DEBUG << "In callback for constant_tile_callback against node = "
- << m.get_match_root()->get_name();
-
- auto pattern_map = m.get_pattern_map();
-
- auto data = static_pointer_cast<op::Constant>(pattern_map[data_label]);
- auto tile = m.get_match_root();
-
- NGRAPH_CHECK(revalidate_and_ensure_static(tile));
-
- std::shared_ptr<Node> replacement;
- auto data_type = data->get_output_element_type(0);
- switch (data_type)
- {
- case element::Type_t::undefined:
- NGRAPH_CHECK(false, "Encountered 'undefined' element type in constant_tile_callback");
- break;
- case element::Type_t::dynamic:
- NGRAPH_CHECK(false, "Encountered 'dynamic' element type in constant_tile_callback");
- break;
- case element::Type_t::u1:
- NGRAPH_CHECK(false, "Encountered 'u1' element type in constant_tile_callback");
- break;
- case element::Type_t::boolean: replacement = fold_constant_tile<char>(data, tile); break;
- case element::Type_t::bf16: replacement = fold_constant_tile<bfloat16>(data, tile); break;
- case element::Type_t::f16: replacement = fold_constant_tile<float16>(data, tile); break;
- case element::Type_t::f32: replacement = fold_constant_tile<float>(data, tile); break;
- case element::Type_t::f64: replacement = fold_constant_tile<double>(data, tile); break;
- case element::Type_t::i8: replacement = fold_constant_tile<int8_t>(data, tile); break;
- case element::Type_t::i16: replacement = fold_constant_tile<int16_t>(data, tile); break;
- case element::Type_t::i32: replacement = fold_constant_tile<int32_t>(data, tile); break;
- case element::Type_t::i64: replacement = fold_constant_tile<int64_t>(data, tile); break;
- case element::Type_t::u8: replacement = fold_constant_tile<uint8_t>(data, tile); break;
- case element::Type_t::u16: replacement = fold_constant_tile<uint16_t>(data, tile); break;
- case element::Type_t::u32: replacement = fold_constant_tile<uint32_t>(data, tile); break;
- case element::Type_t::u64: replacement = fold_constant_tile<uint64_t>(data, tile); break;
- }
-
- replace_node(m.get_match_root(), replacement);
- return true;
- };
-
- auto tile_matcher_v0 = make_shared<pattern::Matcher>(tile_v0, "ConstantFolding.ConstantTileV0");
- this->add_matcher(tile_matcher_v0, constant_tile_callback, PassProperty::CHANGE_DYNAMIC_STATE);
-}
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include <cmath>
+#include <stdio.h>
+
+#include "ngraph/check.hpp"
+#include "ngraph/runtime/reference/tile.hpp"
+
+using namespace ngraph;
+
+void runtime::reference::tile(
+ const char* arg, char* out, const Shape& in_shape, const Shape& out_shape, size_t elem_size)
+{
+ Shape in_shape_expanded(in_shape);
+ in_shape_expanded.insert(in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1);
+ CoordinateTransform input_transform(in_shape_expanded);
+ CoordinateTransform output_transform(out_shape);
+
+ for (const Coordinate& output_coord : output_transform)
+ {
+ std::vector<size_t> coord;
+ for (auto i = 0; i < output_coord.size(); i++)
+ {
+ auto val = output_coord[i] % in_shape_expanded[i];
+ coord.push_back(val);
+ }
+ Coordinate input_coord(coord);
+
+ memcpy(out + output_transform.index(output_coord) * elem_size,
+ arg + input_transform.index(input_coord) * elem_size,
+ elem_size);
+ }
+}
#include <cmath>
#include "ngraph/coordinate_transform.hpp"
+#include "ngraph/type/element_type.hpp"
namespace ngraph
{
{
namespace reference
{
- template <typename T>
- void tile(const T* arg, T* out, const Shape& in_shape, const Shape& out_shape)
- {
- Shape in_shape_expanded(in_shape);
- in_shape_expanded.insert(
- in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1);
- CoordinateTransform input_transform(in_shape_expanded);
- CoordinateTransform output_transform(out_shape);
-
- for (const Coordinate& output_coord : output_transform)
- {
- std::vector<size_t> coord;
- for (auto i = 0; i < output_coord.size(); i++)
- {
- auto val = output_coord[i] % in_shape_expanded[i];
- coord.push_back(val);
- }
- Coordinate input_coord(coord);
- out[output_transform.index(output_coord)] =
- arg[input_transform.index(input_coord)];
- }
- }
+ void tile(const char* arg,
+ char* out,
+ const Shape& in_shape,
+ const Shape& out_shape,
+ size_t elem_size);
}
}
}
case OP_TYPEID::Tile:
{
- node = make_shared<op::Tile>(args[0], args[1]);
+ node = make_shared<op::v0::Tile>(args[0], args[1]);
break;
}
case OP_TYPEID::TopK:
auto repeats = make_shared<op::Constant>(element::i64, shape_re, vector<int>{2, 2, 1});
Shape shape_r{2, 2, 3};
- auto tile = make_shared<op::Tile>(A, repeats);
+ auto tile = make_shared<op::v0::Tile>(A, repeats);
auto f = make_shared<Function>(tile, ParameterVector{A});
auto repeats = make_shared<op::Constant>(element::i64, shape_re, vector<int>{2, 1});
Shape shape_r{2, 2, 3};
- auto tile = make_shared<op::Tile>(A, repeats);
+ auto tile = make_shared<op::v0::Tile>(A, repeats);
auto f = make_shared<Function>(tile, ParameterVector{A});
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
- ASSERT_EQ(count_ops_of_type<op::Tile>(f), 0);
+ ASSERT_EQ(count_ops_of_type<op::v0::Tile>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
- ASSERT_EQ(count_ops_of_type<op::Tile>(f), 0);
+ ASSERT_EQ(count_ops_of_type<op::v0::Tile>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
- ASSERT_EQ(count_ops_of_type<op::Tile>(f), 0);
+ ASSERT_EQ(count_ops_of_type<op::v0::Tile>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
- ASSERT_EQ(count_ops_of_type<op::Tile>(f), 0);
+ ASSERT_EQ(count_ops_of_type<op::v0::Tile>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
- ASSERT_EQ(count_ops_of_type<op::Tile>(f), 0);
+ ASSERT_EQ(count_ops_of_type<op::v0::Tile>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const =
void op_is_Tile()
{
- op::Tile node;
+ op::v0::Tile node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
NGRAPH_OP(Tan, ngraph::op)
NGRAPH_OP(Tanh, ngraph::op)
NGRAPH_OP(TensorIterator, ngraph::op)
-NGRAPH_OP(Tile, ngraph::op)
+NGRAPH_OP(Tile, ngraph::op::v0)
NGRAPH_OP(TopK, ngraph::op::v0)
NGRAPH_OP(Unsqueeze, ngraph::op)
NGRAPH_OP(Xor, ngraph::op)
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{6, 8, 10});
auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1});
- auto top = make_shared<op::Tile>(param0, param1);
+ auto top = make_shared<op::v0::Tile>(param0, param1);
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{18, 32, 10}));
}
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{8, 10});
auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1});
- auto top = make_shared<op::Tile>(param0, param1);
+ auto top = make_shared<op::v0::Tile>(param0, param1);
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{3, 32, 10}));
}
{
auto param0 = make_shared<op::Parameter>(element::f32, Shape{6, 8, 10});
auto param1 = op::Constant::create(element::i64, Shape{2}, {4, 1});
- auto top = make_shared<op::Tile>(param0, param1);
+ auto top = make_shared<op::v0::Tile>(param0, param1);
ASSERT_EQ(top->get_element_type(), element::f32);
ASSERT_EQ(top->get_shape(), (Shape{6, 32, 10}));
}