* Avoid symbols clash due to source merging.
* Tested on Ubuntu 20.04 with GCC 9.3.0.
SOURCES ${SOURCES} ${HEADERS}
VERSION_DEFINES_FOR hetero_plugin.cpp)
+ie_faster_build(${TARGET_NAME}
+ UNITY
+)
+
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine ade pugixml ${NGRAPH_LIBRARIES} inference_engine_transformations)
namespace ngraph {
-template <typename T>
-VariantImpl<T>::~VariantImpl() { }
-
template class INFERENCE_ENGINE_API_CLASS(VariantImpl<InferenceEngine::Parameter>);
template <>
${LIBRARY_SRC}
${LIBRARY_HEADERS})
+ie_faster_build(${TARGET_NAME}_obj
+ UNITY
+)
+
target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_PREPROC_PLUGIN
$<TARGET_PROPERTY:inference_engine,INTERFACE_COMPILE_DEFINITIONS>
$<TARGET_PROPERTY:fluid,INTERFACE_COMPILE_DEFINITIONS>)
add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC})
+ie_faster_build(${TARGET_NAME}
+ UNITY
+)
+
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN
IR_READER_V10)
add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC})
+ie_faster_build(${TARGET_NAME}
+ UNITY
+)
+
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN)
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/"
add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${PUBLIC_HEADERS})
ie_faster_build(${TARGET_NAME}
+ UNITY
PCH PRIVATE "src/precomp.hpp"
)
namespace ngraph {
-template <typename T>
-VariantImpl<T>::~VariantImpl() { }
-
template class ngraph::VariantImpl<FusedNames>;
constexpr VariantTypeInfo VariantWrapper<FusedNames>::type_info;
}
-} // namespace ngraph
\ No newline at end of file
+} // namespace ngraph
namespace ngraph {
-template <typename T>
-VariantImpl<T>::~VariantImpl() { }
-
template class ngraph::VariantImpl<PrimitivesPriority>;
constexpr VariantTypeInfo VariantWrapper<PrimitivesPriority>::type_info;
function(add_common_target TARGET_NAME STATIC_IE)
add_library(${TARGET_NAME} STATIC ${SOURCES})
+ ie_faster_build(${TARGET_NAME}
+ UNITY
+ )
+
set_ie_threading_interface_for(${TARGET_NAME})
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
} // namespace details
-#define VPU_THROW_FORMAT(...) \
- vpu::details::throwFormat<vpu::details::VPUException>(__FILE__, __LINE__, __VA_ARGS__)
+#define VPU_THROW_FORMAT(...) \
+ ::vpu::details::throwFormat<::vpu::details::VPUException>(__FILE__, __LINE__, __VA_ARGS__)
-#define VPU_THROW_UNLESS(condition, ...) \
- do { \
- if (!(condition)) { \
+#define VPU_THROW_UNLESS(condition, ...) \
+ do { \
+ if (!(condition)) { \
::vpu::details::throwFormat<::vpu::details::VPUException>(__FILE__, __LINE__, __VA_ARGS__); \
- } \
+ } \
} while (false)
-#define VPU_THROW_UNSUPPORTED_UNLESS(condition, ...) \
- do { \
- if (!(condition)) { \
- vpu::details::throwFormat<vpu::details::UnsupportedLayerException>(__FILE__, __LINE__, __VA_ARGS__); \
- } \
+#define VPU_THROW_UNSUPPORTED_UNLESS(condition, ...) \
+ do { \
+ if (!(condition)) { \
+ ::vpu::details::throwFormat<::vpu::details::UnsupportedLayerException>(__FILE__, __LINE__, __VA_ARGS__); \
+ } \
} while (false)
#ifdef NDEBUG
-# define VPU_INTERNAL_CHECK(condition, ...) \
- do { \
- if (!(condition)) { \
- vpu::details::throwFormat<details::VPUException>( \
- __FILE__, __LINE__, \
- "[Internal Error]: " __VA_ARGS__); \
- } \
+# define VPU_INTERNAL_CHECK(condition, ...) \
+ do { \
+ if (!(condition)) { \
+ ::vpu::details::throwFormat<::vpu::details::VPUException>( \
+ __FILE__, __LINE__, \
+ "[Internal Error]: " __VA_ARGS__); \
+ } \
} while (false)
#else
-# define VPU_INTERNAL_CHECK(condition, ...) \
- assert((condition) || !formatString(__VA_ARGS__).empty())
+# define VPU_INTERNAL_CHECK(condition, ...) \
+ assert((condition) || !::vpu::formatString(__VA_ARGS__).empty())
#endif
#ifdef NDEBUG
-# define VPU_INTERNAL_FAIL(...) \
- vpu::details::throwFormat<details::VPUException>( \
- __FILE__, __LINE__, \
+# define VPU_INTERNAL_FAIL(...) \
+ ::vpu::details::throwFormat<::vpu::details::VPUException>( \
+ __FILE__, __LINE__, \
"[Internal Error] Unreachable code: " __VA_ARGS__)
#else
# define VPU_INTERNAL_FAIL(...) \
- assert(false && !formatString(__VA_ARGS__).empty())
+ assert(false && !::vpu::formatString(__VA_ARGS__).empty())
#endif
} // namespace vpu
return true;
}
-namespace {
+namespace dyn_shape {
template<element::Type_t ET>
bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) {
return rc;
}
-} // namespace
+} // namespace dyn_shape
bool DynamicShapeResolver::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const {
- return evaluateDynamicShapeResolver(inputs[0], inputs[1], outputs[0]);
+ return dyn_shape::evaluateDynamicShapeResolver(inputs[0], inputs[1], outputs[0]);
}
} // namespace op
return true;
}
-namespace {
+namespace out_shape {
template<element::Type_t ET>
bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) {
return true;
}
-} // namespace
+} // namespace out_shape
bool OutShapeOfReshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const {
- return evaluateOutShapeOfReshape(inputs[0], inputs[1], m_specialZero, outputs[0]);
+ return out_shape::evaluateOutShapeOfReshape(inputs[0], inputs[1], m_specialZero, outputs[0]);
}
)
ie_faster_build(${TARGET_NAME}
+ UNITY
PCH PRIVATE "precomp.hpp"
)
ADD_CPPLINT
LABELS
CPU
-)
\ No newline at end of file
+)
+
+ie_faster_build(${TARGET_NAME}
+ UNITY
+)
add_library(${TARGET_NAME} STATIC ${TEST_INCLUDE} ${TEST_SRC})
+ie_faster_build(${TARGET_NAME}
+ UNITY
+)
+
list(APPEND EXPORT_DEPENDENCIES
funcTestUtils
ieTestHelpers
add_definitions(-DMKLDNN_ENABLE_CONCURRENT_EXEC)
-target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
\ No newline at end of file
+target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
if(COMMAND ie_faster_build)
ie_faster_build(ngraph
+ UNITY
PCH PRIVATE "src/precomp.hpp"
)
endif()
{
}
- ~VariantImpl() override;
-
const value_type& get() const { return m_value; }
value_type& get() { return m_value; }
void set(const value_type& value) { m_value = value; }
if(COMMAND ie_faster_build)
ie_faster_build(${TARGET_NAME}
+ UNITY
PCH PRIVATE "src/precomp.hpp"
)
endif()
#pragma once
+#include <cmath>
#include <cstddef>
#include "ngraph/runtime/reference/autobroadcast_binop.hpp"
{
autobroadcast_binop(
arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T {
- return x - y * floor(x / y);
+ return x - y * std::floor(x / y);
});
}
}
return m_node->shared_from_this();
}
-Output<Node> descriptor::Output::get_output() const
+ngraph::Output<Node> descriptor::Output::get_output() const
{
return get_node()->output(m_index);
}
return make_shared<Abs>(new_args.at(0));
}
-namespace
+namespace absop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Abs::evaluate");
- return evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Acos>(new_args.at(0));
}
-namespace
+namespace acosop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Acos::evaluate");
- return evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Acosh>(new_args.at(0));
}
-namespace
+namespace acoshop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Acosh::evaluate");
- return evaluate_acosh(inputs[0], outputs[0]);
+ return acoshop::evaluate_acosh(inputs[0], outputs[0]);
}
return make_shared<op::Add>(arg0, arg1);
}
-namespace
+namespace add
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Add::evaluate");
- return evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
+ return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------- v1 ------------------------------------------
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Add::evaluate");
- return evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
+ return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<v1::LogicalAnd>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace logand
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalAnd::evaluate");
- return evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
+ return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<Asin>(new_args.at(0));
}
-namespace
+namespace asinop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Asin::evaluate");
- return evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Asinh>(new_args.at(0));
}
-namespace
+namespace asinhop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
bool op::v3::Asinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Asinh::evaluate");
- return evaluate_asinh(inputs[0], outputs[0]);
+ return asinhop::evaluate_asinh(inputs[0], outputs[0]);
}
return make_shared<Atan>(new_args.at(0));
}
-namespace
+namespace atanop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Atan::evaluate");
- return evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Atanh>(new_args.at(0));
}
-namespace
+namespace atanhop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
bool op::v3::Atanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Atanh::evaluate");
- return evaluate_atanh(inputs[0], outputs[0]);
+ return atanhop::evaluate_atanh(inputs[0], outputs[0]);
}
return make_shared<Ceiling>(new_args.at(0));
}
-namespace
+namespace ceiling
{
// function used by TYPE_CASE
template <element::Type_t ET>
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Ceiling::evaluate");
- return evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
constexpr NodeTypeInfo op::Clamp::type_info;
-namespace
+namespace clamp
{
template <element::Type_t ET, typename T>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, T min, T max, size_t count)
bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Clamp::evaluate");
- return evaluate_clamp(
+ return clamp::evaluate_clamp(
inputs[0], outputs[0], get_min(), get_max(), shape_size(get_input_shape(0)));
}
return make_shared<Convert>(new_args.at(0), m_destination_type);
}
-namespace
+namespace convert
{
template <element::Type_t INPUT_ET, element::Type_t OUTPUT_ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out)
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Convert::evaluate");
- return evaluate_convert(input_values[0], output_values[0]);
+ return convert::evaluate_convert(input_values[0], output_values[0]);
}
return make_shared<Cos>(new_args.at(0));
}
-namespace
+namespace cosop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Cos::evaluate");
- return evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Cosh>(new_args.at(0));
}
-namespace
+namespace coshop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Cosh::evaluate");
- return evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<op::v0::Divide>(arg0, arg1);
}
-namespace
+namespace divide
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Divide::evaluate");
- return evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
+ return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
}
// ------------------------------ v1 -------------------------------------------
bool op::v1::Divide::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Divide::evaluate");
- return evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
+ return divide::evaluate_divide(inputs[0], inputs[1], outputs[0], get_autob(), is_pythondiv());
}
return make_shared<op::v0::Equal>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace equal
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Equal::evaluate");
- return evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
//------------------------------- v1 -------------------------------------------
bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Equal::evaluate");
- return evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
constructor_validate_and_infer_types();
}
-namespace
+namespace erfop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Erf::evaluate");
- return evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Exp>(new_args.at(0));
}
-namespace
+namespace expop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Exp::evaluate");
- return evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return expop::evaluate_exp(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Floor>(new_args.at(0));
}
-namespace
+namespace floorop
{
// function used by TYPE_CASE
template <element::Type_t ET>
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Floor::evaluate");
- return evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<FloorMod>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace floor_mod
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::FloorMod::evaluate");
- return evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
+ return floor_mod::evaluate_floor_mod(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<v1::Gather>(new_args.at(PARAMS), new_args.at(INDICES), new_args.at(AXIS));
}
-namespace
+namespace gather
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Gather::evaluate");
- return evaluate_gather(inputs[0], inputs[1], outputs[0], get_axis());
+ return gather::evaluate_gather(inputs[0], inputs[1], outputs[0], get_axis());
}
bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
axis += input_rank.get_length();
}
}
- return evaluate_gather(inputs[0], inputs[1], outputs[0], axis);
+ return gather::evaluate_gather(inputs[0], inputs[1], outputs[0], axis);
}
return make_shared<op::v0::Greater>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace greaterop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Greater::evaluate");
- return evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
+ return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
}
//-------------------------------------- v1 ------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Greater::evaluate");
- return evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
+ return greaterop::evaluate_greater(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<op::v0::GreaterEq>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace greater_equalop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::GreaterEq::evaluate");
- return evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
//---------------------------------- v1 ----------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::GreaterEqual::evaluate");
- return evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<op::v4::HSwish>(new_args.at(0));
}
-namespace
+namespace hswish
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
- return evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return hswish::evaluate_hswish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<op::v0::Less>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace lessop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Less::evaluate");
- return evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
+ return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
}
// ----------------------------- v1 --------------------------------------------
bool op::v1::Less::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Less::evaluate");
- return evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
+ return lessop::evaluate_less(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<v1::LessEqual>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace less_equalop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LessEqual::evaluate");
- return evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
// ---------------------------------- v0 ---------------------------------------
bool op::v0::LessEq::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::LessEq::evaluate");
- return evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return less_equalop::evaluate_less_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<Log>(new_args.at(0));
}
-namespace
+namespace logop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Log::evaluate");
- return evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<MatMul>(new_args.at(0), new_args.at(1), m_transpose_a, m_transpose_b);
}
-namespace
+namespace matmul
{
Shape evaluate_matmul_output_shape(const Shape& arg0_shape,
const Shape& arg1_shape,
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::MatMul::evaluate");
- return evaluate_matmul(inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
+ return matmul::evaluate_matmul(
+ inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
}
}
}
-namespace
+namespace maxop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
bool op::v0::Max::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Max::evaluate");
- return evaluate_max(inputs[0], outputs[0], get_reduction_axes(), false);
+ return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), false);
}
constexpr NodeTypeInfo op::v1::ReduceMax::type_info;
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMax::evaluate");
- return evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return op::Constant::create(get_element_type(), get_shape(), {0});
}
-namespace
+namespace maxpool
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg,
true,
get_rounding_type() == op::RoundingType::CEIL);
- return evaluate_maxpool(inputs[0],
- outputs[0],
- out_shape.get_shape(),
- get_kernel(),
- get_strides(),
- get_pads_begin(),
- get_pads_end());
+ return maxpool::evaluate_maxpool(inputs[0],
+ outputs[0],
+ out_shape.get_shape(),
+ get_kernel(),
+ get_strides(),
+ get_pads_begin(),
+ get_pads_end());
}
return make_shared<op::v0::Maximum>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace maximumop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Maximum::evaluate");
- return evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
+ return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------------ v1 -------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Maximum::evaluate");
- return evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
+ return maximumop::evaluate_maximum(inputs[0], inputs[1], outputs[0], get_autob());
}
}
}
-namespace
+namespace minop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes)
bool op::v0::Min::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Min::evaluate");
- return evaluate_min(inputs[0], outputs[0], get_reduction_axes());
+ return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes());
}
constexpr NodeTypeInfo op::v1::ReduceMin::type_info;
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMin::evaluate");
- return evaluate_min(inputs[0], outputs[0], get_reduction_axes());
+ return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes());
}
return make_shared<op::v0::Minimum>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace minimumop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Minimum::evaluate");
- return evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
+ return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------ v1 -------------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Minimum::evaluate");
- return evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
+ return minimumop::evaluate_minimum(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<Mish>(new_args.at(0));
}
-namespace
+namespace mish
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::Mish::evaluate");
- return evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return mish::evaluate_mish(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<op::v0::Multiply>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace multiplyop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Multiply::evaluate");
- return evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
+ return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------------ v1 -------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Multiply::evaluate");
- return evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
+ return multiplyop::evaluate_multiply(inputs[0], inputs[1], outputs[0], get_autob());
}
// -----------------------------------------------------------------------------
return make_shared<Negative>(new_args.at(0));
}
-namespace
+namespace negativeop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Negative::evaluate");
- return evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
shared_ptr<Node> ngraph::operator-(const Output<Node>& arg0)
return make_shared<v3::NonZero>(new_args.at(0), m_output_type);
}
-namespace
+namespace nonzero
{
template <element::Type_t INPUT_ET, element::Type_t OUT_ET>
bool evaluate_nonzero_execute(const HostTensorPtr& input, const HostTensorPtr& output)
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::NonZero::evaluate");
- return evaluate_nonzero(inputs[0], outputs[0]);
+ return nonzero::evaluate_nonzero(inputs[0], outputs[0]);
}
return make_shared<v1::LogicalNot>(new_args.at(0));
}
-namespace
+namespace notop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalNot::evaluate");
- return evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
constexpr NodeTypeInfo op::v0::Not::type_info;
bool op::Not::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Not::evaluate");
- return evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return notop::evaluate_not(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<op::v0::NotEqual>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace not_equalop
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::NotEqual::evaluate");
- return evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
// ----------------------------------- v1 --------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::NotEqual::evaluate");
- return evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
+ return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<v1::LogicalOr>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace logor
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalOr::evaluate");
- return evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
+ return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
}
constexpr NodeTypeInfo op::v0::Or::type_info;
bool op::v0::Or::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Or::evaluate");
- return evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
+ return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<op::v0::Power>(new_args.at(0), new_args.at(1), this->get_autob());
}
-namespace
+namespace power
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
bool op::v0::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Power::evaluate");
- return evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
+ return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------ v1 -------------------------------------------
bool op::v1::Power::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Power::evaluate");
- return evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
+ return power::evaluate_power(inputs[0], inputs[1], outputs[0], get_autob());
}
return make_shared<PRelu>(new_args.at(0), new_args.at(1));
}
-template <element::Type_t ET>
-bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out)
+namespace prelu
{
- runtime::reference::prelu(arg->get_data_ptr<ET>(),
- slope->get_data_ptr<ET>(),
- out->get_data_ptr<ET>(),
- arg->get_shape(),
- slope->get_shape());
- return true;
-}
+ template <element::Type_t ET>
+ bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out)
+ {
+ runtime::reference::prelu(arg->get_data_ptr<ET>(),
+ slope->get_data_ptr<ET>(),
+ out->get_data_ptr<ET>(),
+ arg->get_shape(),
+ slope->get_shape());
+ return true;
+ }
-bool evaluate_prelu(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out)
-{
- bool rc = true;
- switch (arg->get_element_type())
+ bool evaluate_prelu(const HostTensorPtr& arg,
+ const HostTensorPtr& slope,
+ const HostTensorPtr& out)
{
- TYPE_CASE(i8)(arg, slope, out);
- break;
- TYPE_CASE(bf16)(arg, slope, out);
- break;
- TYPE_CASE(f16)(arg, slope, out);
- break;
- TYPE_CASE(f32)(arg, slope, out);
- break;
- default: rc = false; break;
+ bool rc = true;
+ switch (arg->get_element_type())
+ {
+ TYPE_CASE(i8)(arg, slope, out);
+ break;
+ TYPE_CASE(bf16)(arg, slope, out);
+ break;
+ TYPE_CASE(f16)(arg, slope, out);
+ break;
+ TYPE_CASE(f32)(arg, slope, out);
+ break;
+ default: rc = false; break;
+ }
+ return rc;
}
- return rc;
}
bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::PRelu::evaluate");
- return evaluate_prelu(inputs[0], inputs[1], outputs[0]);
+ return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]);
}
return true;
}
-namespace
+namespace prior_box
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
return true;
}
-namespace
+namespace prior_box_clustered
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
return ngraph::make_constant_from_string("1", get_element_type(), get_shape());
}
-namespace
+namespace product
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Product::evaluate");
- return evaluate_product(inputs[0], outputs[0], get_reduction_axes(), false);
+ return product::evaluate_product(inputs[0], outputs[0], get_reduction_axes(), false);
}
return make_shared<op::v4::ReduceL1>(new_args.at(0), new_args.at(1), get_keep_dims());
}
-namespace
+namespace reduce_l1
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::ReduceL1::evaluate");
- return evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return reduce_l1::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return make_shared<op::v4::ReduceL2>(new_args.at(0), new_args.at(1), get_keep_dims());
}
-namespace
+namespace reduce_l2
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v4::ReduceL2::evaluate");
- return evaluate_reduce_l2(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return reduce_l2::evaluate_reduce_l2(
+ inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return make_shared<op::v1::ReduceMean>(new_args.at(0), new_args.at(1), get_keep_dims());
}
-namespace
+namespace mean
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceMean::evaluate");
- return evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return mean::evaluate_mean(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return make_shared<ReduceProd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
-namespace
+namespace reduce_prod
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceProd::evaluate");
- return evaluate_product(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return reduce_prod::evaluate_product(
+ inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return make_shared<ReduceSum>(new_args.at(0), new_args.at(1), get_keep_dims());
}
-namespace
+namespace reduce_sum
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::ReduceSum::evaluate");
- return evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
+ return reduce_sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
}
return make_shared<Relu>(new_args.at(0));
}
-namespace
+namespace relu
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Relu::evaluate");
- return evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return relu::evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Round>(new_args.at(0));
}
-namespace
+namespace roundop
{
// function used by TYPE_CASE
template <element::Type_t ET>
bool op::Round::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Round::evaluate");
- return evaluate_round(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return roundop::evaluate_round(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3));
}
-namespace
+namespace scatter_element_update
{
template <element::Type_t DT, element::Type_t IT, element::Type_t AT>
bool evaluate(const HostTensorPtr& data,
}
}
- return evaluate_scatter_element_update(
+ return scatter_element_update::evaluate_scatter_element_update(
inputs[0], inputs[1], inputs[2], inputs[3], outputs[0], normalized_axis);
}
return new_shape_of;
}
-namespace
+namespace shape_of
{
template <element::Type_t ET>
inline bool evaluate(const Shape& shape, const HostTensorPtr& output_value)
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::ShapeOf::evaluate");
- return evaluate_shape_of(output_values[0], input_values[0]);
+ return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v3::ShapeOf::constant_fold");
- return constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
+ return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
}
// op::v0::ShapeOf
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::ShapeOf::evaluate");
- return evaluate_shape_of(output_values[0], input_values[0]);
+ return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v0::ShapeOf::constant_fold");
- return constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
+ return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
}
constructor_validate_and_infer_types();
}
-namespace
+namespace sigmoid
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sigmoid::evaluate");
- return evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return sigmoid::evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Sign>(new_args.at(0));
}
-namespace
+namespace signop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sign::evaluate");
- return evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Sin>(new_args.at(0));
}
-namespace
+namespace sinop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sin::evaluate");
- return evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Sinh>(new_args.at(0));
}
-namespace
+namespace sinhop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sinh::evaluate");
- return evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<op::v4::SoftPlus>(new_args.at(0));
}
-namespace
+namespace softplus
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::SoftPlus::evaluate");
- return evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return softplus::evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<v1::Split>(new_args.at(0), new_args.at(1), m_num_splits);
}
-namespace
+namespace split
{
inline bool evaluate(const HostTensorPtr& data_tensor,
const HostTensorVector& outputs,
const auto& data = inputs[0];
const auto& axis = inputs[1];
- return evaluate_split(data, axis, outputs, m_num_splits, this);
+ return split::evaluate_split(data, axis, outputs, m_num_splits, this);
}
return make_shared<Sqrt>(new_args.at(0));
}
-namespace
+namespace sqrtop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Sqrt::evaluate");
- return evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Squeeze>(new_args.at(0), new_args.at(1));
}
-namespace
+namespace squeeze
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Squeeze::evaluate");
- return evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
+ return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
}
m_ellipsis_mask);
}
-namespace
+namespace strided_slice
{
inline bool evaluate(const HostTensorPtr& in, const SlicePlan& sp, const HostTensorPtr& out)
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::StridedSlice::evaluate");
- return evaluate_strided_slice(input_values[0],
- input_values[1],
- input_values[2],
- input_values[3],
- convert_mask_to_axis_set(get_begin_mask()),
- convert_mask_to_axis_set(get_end_mask()),
- convert_mask_to_axis_set(get_new_axis_mask()),
- convert_mask_to_axis_set(get_shrink_axis_mask()),
- convert_mask_to_axis_set(get_ellipsis_mask()),
- output_values[0]);
+ return strided_slice::evaluate_strided_slice(input_values[0],
+ input_values[1],
+ input_values[2],
+ input_values[3],
+ convert_mask_to_axis_set(get_begin_mask()),
+ convert_mask_to_axis_set(get_end_mask()),
+ convert_mask_to_axis_set(get_new_axis_mask()),
+ convert_mask_to_axis_set(get_shrink_axis_mask()),
+ convert_mask_to_axis_set(get_ellipsis_mask()),
+ output_values[0]);
}
return make_shared<op::v0::Subtract>(arg0, arg1);
}
-namespace
+namespace subtract
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Subtract::evaluate");
- return evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
+ return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
}
// ------------------------------- v1 ------------------------------------------
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Subtract::evaluate");
- return evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
+ return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob());
}
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
-namespace
+namespace sum
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg,
bool op::v0::Sum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Sum::evaluate");
- return evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), false);
+ return sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), false);
}
}
}
-namespace
+namespace swish
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0,
{
if (inputs.size() == 2)
{
- return evaluate_swish(inputs[0], inputs[1], outputs[0], shape_size(get_output_shape(0)));
+ return swish::evaluate_swish(
+ inputs[0], inputs[1], outputs[0], shape_size(get_output_shape(0)));
}
else
{
- return evaluate_swish(inputs[0], nullptr, outputs[0], shape_size(get_output_shape(0)));
+ return swish::evaluate_swish(
+ inputs[0], nullptr, outputs[0], shape_size(get_output_shape(0)));
}
}
return make_shared<Tan>(new_args.at(0));
}
-namespace
+namespace tanop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Tan::evaluate");
- return evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
return make_shared<Tanh>(new_args.at(0));
}
-namespace
+namespace tanhop
{
template <element::Type_t ET>
inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count)
bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Tanh::evaluate");
- return evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+ return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
}
m_sort);
}
-namespace
+namespace topk
{
template <element::Type_t INPUT_ET, element::Type_t INDEX_ET>
inline bool evaluate_execute(const HostTensorPtr& arg0,
}
else
{
- axis = read_top_k_axis_from_host_tensor(inputs[2]);
+ axis = topk::read_top_k_axis_from_host_tensor(inputs[2]);
NGRAPH_CHECK(axis <= arg_shape.size(), "TopK axis is out of bounds");
}
bool compute_max = get_compute_max();
size_t k = get_k();
if (k == 0)
{
- k = read_k_from_host_tensor(inputs[1]);
+ k = topk::read_k_from_host_tensor(inputs[1]);
if (k == 0)
{
// the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis]
// 3. Compute output_shape
auto output_shape = compute_output_shape(inputs[0]->get_shape(), k, axis);
- return evaluate_topk(inputs[0],
- outputs[0],
- outputs[1],
- output_shape,
- axis,
- k,
- compute_max,
- sort_type,
- get_index_element_type());
+ return topk::evaluate_topk(inputs[0],
+ outputs[0],
+ outputs[1],
+ output_shape,
+ axis,
+ k,
+ compute_max,
+ sort_type,
+ get_index_element_type());
}
// v1 version starts
}
else
{
- k = read_k_from_host_tensor(inputs[1]);
+ k = topk::read_k_from_host_tensor(inputs[1]);
}
// 3. Compute output_shape
k = arg_shape[axis];
}
- return evaluate_topk(inputs[0],
- outputs[1],
- outputs[0],
- output_shape,
- axis,
- k,
- compute_max,
- sort_type,
- get_index_element_type());
+ return topk::evaluate_topk(inputs[0],
+ outputs[1],
+ outputs[0],
+ output_shape,
+ axis,
+ k,
+ compute_max,
+ sort_type,
+ get_index_element_type());
}
// v3 version starts
return make_shared<v1::Transpose>(new_args[0], new_args[1]);
}
-namespace
+namespace transpose
{
template <element::Type_t ET>
std::vector<int64_t> get_vector(const HostTensorPtr& arg)
const HostTensorVector& input_values) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Transpose::evaluate");
- return evaluate_transpose(input_values[0], input_values[1], output_values[0]);
+ return transpose::evaluate_transpose(input_values[0], input_values[1], output_values[0]);
}
return make_shared<Unsqueeze>(new_args.at(0), new_args.at(1));
}
-namespace
+namespace unsqueeze
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out)
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Unsqueeze::evaluate");
- return evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
+ return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
}
return make_shared<v1::VariadicSplit>(new_args.at(0), new_args.at(1), new_args.at(2));
}
-namespace
+namespace variadic_split
{
inline bool evaluate(const HostTensorPtr& in,
const HostTensorPtr& out,
const auto& axis = inputs[1];
const auto& split_lengths = inputs[2];
- return evaluate_variadic_split(data, axis, split_lengths, outputs, this);
+ return variadic_split::evaluate_variadic_split(data, axis, split_lengths, outputs, this);
}
return true;
}
-namespace
+namespace logxor
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::LogicalXor::evaluate");
- return evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
+ return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
constexpr NodeTypeInfo op::v0::Xor::type_info;
bool op::v0::Xor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Xor::evaluate");
- return evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
+ return logxor::evaluate_logxor(inputs[0], inputs[1], outputs[0], get_autob());
}
return nullptr;
}
-template <typename T>
-VariantImpl<T>::~VariantImpl()
-{
-}
-
template class ngraph::VariantImpl<std::string>;
template class ngraph::VariantImpl<int64_t>;
if(COMMAND ie_faster_build)
ie_faster_build(onnx_importer
+ UNITY
PCH PRIVATE "src/precomp.hpp"
)
endif()
{
constexpr NodeTypeInfo NullNode::type_info;
- std::shared_ptr<Node>
+ std::shared_ptr<ngraph::Node>
NullNode::clone_with_new_inputs(const OutputVector& /* new_args */) const
{
return std::make_shared<NullNode>();
m_padding_above,
m_kernel_shape,
!count_include_pad,
- op::RoundingType::FLOOR,
+ ngraph::op::RoundingType::FLOOR,
m_auto_pad)};
}
m_padding_below,
m_padding_above,
m_kernel_shape,
- op::RoundingType::FLOOR,
+ ngraph::op::RoundingType::FLOOR,
m_auto_pad)};
}
namespace ngraph
{
- template <typename T>
- VariantImpl<T>::~VariantImpl()
- {
- }
-
template <>
class VariantWrapper<Ship> : public VariantImpl<Ship>
{
disable_deprecated_warnings()
add_library(ngraph_backend SHARED ${SRC})
+
+if(COMMAND ie_faster_build)
+ ie_faster_build(ngraph_backend
+ UNITY
+ )
+endif()
+
target_compile_definitions(ngraph_backend
PRIVATE
SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}"
)
add_library(ie_backend ${LIBRARY_TYPE} ${SRC})
+
+if(COMMAND ie_faster_build)
+ ie_faster_build(ie_backend
+ UNITY
+ )
+endif()
+
add_dependencies(ie_backend inference_engine)
target_compile_definitions(ie_backend PRIVATE IE_BACKEND_DLL_EXPORTS)
target_include_directories(ie_backend PUBLIC ${IE_MAIN_SOURCE_DIR}/include)
if (NGRAPH_INTERPRETER_ENABLE)
add_library(interpreter_backend ${LIBRARY_TYPE} int_backend.cpp int_executable.cpp)
+
+ if(COMMAND ie_faster_build)
+ ie_faster_build(interpreter_backend
+ UNITY
+ )
+ endif()
+
target_compile_definitions(interpreter_backend PRIVATE INTERPRETER_BACKEND_EXPORTS)
if(NGRAPH_LIB_VERSIONING_ENABLE)
set_target_properties(interpreter_backend PROPERTIES
using namespace std;
using namespace ngraph;
-namespace
+namespace opset0_downgrade
{
template <typename OpV0, typename OpV1>
shared_ptr<Node> op_cast_binary_elementwise_node(const shared_ptr<OpV1>& node)
};
return dispatch_map;
}
-} // namespace
+} // namespace opset0_downgrade
bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
- auto& dispatch_map = get_dispatch_map();
+ auto& dispatch_map = opset0_downgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
using namespace std;
using namespace ngraph;
-namespace
+namespace opset1_downgrade
{
shared_ptr<Node> op_cast(shared_ptr<op::v3::Broadcast> node)
{
};
return dispatch_map;
}
-} // namespace
+} // namespace opset1_downgrade
bool pass::Opset1Downgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
- auto& dispatch_map = get_dispatch_map();
+ auto& dispatch_map = opset1_downgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
using namespace std;
using namespace ngraph;
-namespace
+namespace opset1_upgrade
{
template <typename OpV0, typename OpV1>
shared_ptr<Node> op_cast_binary_elementwise_node(const shared_ptr<OpV0>& node)
return dispatch_map;
NGRAPH_SUPPRESS_DEPRECATED_END
}
-} // namespace
+} // namespace opset1_upgrade
bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
{
bool modified = false;
- auto& dispatch_map = get_dispatch_map();
+ auto& dispatch_map = opset1_upgrade::get_dispatch_map();
auto it = dispatch_map.find(node->get_type_info());
if (it != dispatch_map.end())
{
)
add_library(ngraph_test_util STATIC ${SRC})
+
+if(COMMAND ie_faster_build)
+ ie_faster_build(ngraph_test_util
+ UNITY
+ )
+endif()
+
if(NGRAPH_LIB_VERSIONING_ENABLE)
set_target_properties(ngraph_test_util PROPERTIES
VERSION ${NGRAPH_VERSION})