* Move `DataType.h` in `ir` directory.
* Move `DataType` to `neurun::ir` namespace.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
switch (type.dtype)
{
- case model::DataType::FLOAT32:
+ case ir::DataType::FLOAT32:
api_type.dtype = NNFW_TYPE_TENSOR_FLOAT32;
break;
- case model::DataType::INT32:
+ case ir::DataType::INT32:
api_type.dtype = NNFW_TYPE_TENSOR_INT32;
break;
- case model::DataType::QUANT8_ASYMM:
+ case ir::DataType::QUANT8_ASYMM:
api_type.dtype = NNFW_TYPE_TENSOR_QUANT8_ASYMM;
break;
- case model::DataType::BOOL8:
+ case ir::DataType::BOOL8:
api_type.dtype = NNFW_TYPE_TENSOR_BOOL;
break;
default:
return NNFW_STATUS_NO_ERROR;
}
-static NNFW_TYPE datatype_to_nnfw_dtype(neurun::model::DataType dt)
+static NNFW_TYPE datatype_to_nnfw_dtype(neurun::ir::DataType dt)
{
- using neurun::model::DataType;
+ using neurun::ir::DataType;
switch (dt)
{
case DataType::FLOAT32:
#include "kernel/ConcatLayer.h"
#include "model/Index.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
const int endData_size = _ctx.at(ends_index).shape().num_elements();
const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
- using neurun::model::DataType;
+ using ir::DataType;
UNUSED_RELEASE(startData_size);
UNUSED_RELEASE(endData_size);
assert(_ctx.at(paddings_index).isConstant());
std::unique_ptr<::arm_compute::IFunction> fn;
- if (_ctx.at(ofm_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (_ctx.at(ofm_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
// NOTE CLSpaceToBatchLayer has a bug that padding's values are 0 even when zero point of
// QASYMM8 is not 0.
#include "kernel/ConcatLayer.h"
#include "model/Index.h"
-#include "model/DataType.h"
#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "Convert.h"
#include "Swizzle.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include <cpp14/memory.h>
namespace
return res;
}
-::arm_compute::DataType asDataType(const ::neurun::model::DataType &type)
+::arm_compute::DataType asDataType(const ir::DataType type)
{
switch (type)
{
- case ::neurun::model::DataType::FLOAT32:
+ case ir::DataType::FLOAT32:
return ::arm_compute::DataType::F32;
- case ::neurun::model::DataType::INT32:
+ case ir::DataType::INT32:
return ::arm_compute::DataType::S32;
- case ::neurun::model::DataType::UINT32:
+ case ir::DataType::UINT32:
return ::arm_compute::DataType::U32;
- case ::neurun::model::DataType::QUANT8_ASYMM:
+ case ir::DataType::QUANT8_ASYMM:
return ::arm_compute::DataType::QASYMM8;
- case ::neurun::model::DataType::BOOL8:
+ case ir::DataType::BOOL8:
return ::arm_compute::DataType::U8;
default:
throw std::runtime_error("Not supported, yet");
::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord,
ir::Layout frontend_layout,
ir::Layout backend_layout);
-::arm_compute::DataType asDataType(const ::neurun::model::DataType &type);
+::arm_compute::DataType asDataType(ir::DataType type);
::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape,
const ::neurun::model::TypeInfo &typeInfo,
ir::Layout frontend_layout, ir::Layout backend_layout,
#include "kernel/ConcatLayer.h"
#include "util/Padding.h"
#include "model/Index.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
const int endData_size = _ctx.at(ends_index).shape().num_elements();
const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
- using neurun::model::DataType;
+ using ir::DataType;
UNUSED_RELEASE(startData_size);
UNUSED_RELEASE(endData_size);
#include "kernel/ConcatLayer.h"
#include "util/Padding.h"
#include "model/Index.h"
-#include "model/DataType.h"
#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)};
// Quantization : not supported
- if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
throw std::runtime_error{"ShapeFixer: NYI for quantized Add"};
}
const auto rhs_index{node.getInputs().at(model::operation::Sub::Input::RHS)};
// Quantization : not supported
- if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
throw std::runtime_error{"ShapeFixer: NYI for quantized Sub"};
}
const auto rhs_index{node.getInputs().at(model::operation::Sub::Input::RHS)};
// Quantization : not supported
- if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
throw std::runtime_error{"ShapeFixer: NYI for quantized Mul"};
}
const auto lhs_index{node.getInputs().at(model::operation::Sub::Input::LHS)};
// Quantization : not supported
- if (_ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (_ctx.at(lhs_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
throw std::runtime_error{"ShapeFixer: NYI for quantized Pad"};
}
#include <cker/Shape.h>
#include "model/Operand.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include <model/InternalType.h>
-using OperandType = neurun::model::DataType;
+using OperandType = neurun::ir::DataType;
namespace neurun
{
void PermuteLayer::configure(std::shared_ptr<backend::operand::ITensor> input,
std::shared_ptr<backend::operand::ITensor> output,
- const model::Shape &output_shape, Type type, model::DataType dataType)
+ const model::Shape &output_shape, Type type, ir::DataType dataType)
{
_input = input;
_output = output;
void PermuteLayer::run()
{
- using ::neurun::model::DataType;
+ using ir::DataType;
switch (_dataType)
{
case DataType::FLOAT32:
void configure(std::shared_ptr<backend::operand::ITensor> input,
std::shared_ptr<backend::operand::ITensor> output,
const model::Shape &output_shape, model::operation::Permute::Type type,
- model::DataType dataType);
+ ir::DataType dataType);
void run();
void runSync()
{
std::shared_ptr<backend::operand::ITensor> _output{nullptr};
model::Shape _output_shape{};
model::operation::Permute::Type _type{model::operation::Permute::Type::COPY};
- model::DataType _dataType{model::DataType::FLOAT32};
+ ir::DataType _dataType{ir::DataType::FLOAT32};
};
} // namespace kernel
public:
void setBuffer(uint8_t *buffer) { _buffer = buffer; }
- ::neurun::model::DataType data_type() const { return _info.typeInfo().type(); }
+ ir::DataType data_type() const { return _info.typeInfo().type(); }
public:
uint8_t *buffer() const override { return _buffer; }
VERBOSE(FillOperandData) << "[SRCN] Fill data for operand " << index.value() << std::endl;
const auto type = obj.typeInfo().type();
- using neurun::model::DataType;
+ using ir::DataType;
using namespace std::placeholders;
switch (type)
#include <cassert>
#include <cpp14/memory.h>
-#include <model/DataType.h>
+#include <ir/DataType.h>
#include "Swizzle.h"
#include <vector>
#include <vector>
#include "model/Operand.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include <model/InternalType.h>
#include <ncnn/srcn/conv_type.h>
-using OperandType = neurun::model::DataType;
+using OperandType = neurun::ir::DataType;
using neurun::util::Coordinates;
namespace neurun
public:
void setBuffer(uint8_t *buffer) { _buffer = buffer; }
- ::neurun::model::DataType data_type() const { return _info.typeInfo().type(); }
+ ir::DataType data_type() const { return _info.typeInfo().type(); }
public:
uint8_t *buffer() const override { return _buffer; }
#include "exec/IFunction.h"
#include "misc/tensor/Shape.h"
-#include "model/DataType.h"
+#include "ir/DataType.h"
#include <vector>
#include <memory>
struct TypeInfo
{
Shape shape;
- model::DataType dtype;
+ ir::DataType dtype;
};
struct CustomKernelConfigParams
return;
const auto type = obj.typeInfo().type();
- using neurun::model::DataType;
+ using ir::DataType;
switch (type)
{
return;
const auto type = obj.typeInfo().type();
- using neurun::model::DataType;
+ using ir::DataType;
using namespace std::placeholders;
switch (type)
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_DATATYPE_H__
-#define __NEURUN_MODEL_DATATYPE_H__
+#ifndef __NEURUN_IR_DATATYPE_H__
+#define __NEURUN_IR_DATATYPE_H__
#include <stdexcept>
namespace neurun
{
-namespace model
+namespace ir
{
enum class DataType
}
}
+} // namespace ir
+
+// TODO Remove after merging 'model' and 'graph' namespaces.
+namespace model
+{
+using DataType = ir::DataType;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_DATATYPE_H__
+#endif // __NEURUN_IR_DATATYPE_H__
#include <algorithm>
#include "Data.h"
-#include "DataType.h"
+#include "ir/DataType.h"
#include "OperandInfo.h"
#include "ir/operand/ParentInfo.h" // TODO Remove this dependency
#include "model/OperationIndexList.h"
void removeDef(const OperationIndex &idx);
public:
- void type(const DataType &type) { _info.type(type); };
+ void type(const DataType type) { _info.type(type); };
public:
void data(std::unique_ptr<Data> &&data) { _data = std::move(data); }
/**
* @brief Set tensor data type
*/
- void type(const DataType &type) { _typeInfo.type(type); }
+ void type(const DataType type) { _typeInfo.type(type); }
/**
* @brief Return size of tensor (bytes)
* @return Tensor size
#include <cstdint>
-#include "DataType.h"
+#include "ir/DataType.h"
namespace neurun
{
int32_t offset() const { return _offset; }
public:
- void type(const DataType &type) { _type = type; }
+ void type(const DataType type) { _type = type; }
private:
DataType _type;
Permute(const OperandIndex &input, const OperandIndex &output,
const backend::BackendContext *input_backend_ctx,
const backend::BackendContext *output_backend_ctx, Type type,
- model::DataType data_type = model::DataType::FLOAT32);
+ DataType data_type = DataType::FLOAT32);
public:
const Param ¶m() const { return _param; }
- model::DataType getDataType() const { return _dataType; }
+ DataType getDataType() const { return _dataType; }
Type getPermuteType() const { return _type; }
private:
Param _param;
Type _type;
- model::DataType _dataType;
+ DataType _dataType;
};
} // namespace operation
for (const auto &input : node.getInputs())
{
const auto &obj = graph.operands().at(input);
- if (obj.typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ if (obj.typeInfo().type() == ir::DataType::QUANT8_ASYMM)
{
return true;
}
for (const auto &output : node.getOutputs())
{
const auto &operand = _graph->operands().at(output);
- const bool quant = operand.typeInfo().type() == model::DataType::QUANT8_ASYMM;
+ const bool quant = operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM;
// average data transfer cost of this operand's data
int64_t avg_transfer_cost = 1;
for (const auto *backend : _all_backends)
for (const auto &input_operand_idx : node.getInputs())
{
const auto &input_operand = _graph->operands().at(input_operand_idx);
- const bool quant = input_operand.typeInfo().type() == model::DataType::QUANT8_ASYMM;
+ const bool quant = input_operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM;
for (const auto &input_node_idx : input_operand.getDef().list())
{
UNUSED_RELEASE(rhs_index);
assert(_ctx.at(lhs_index).typeInfo().type() == _ctx.at(rhs_index).typeInfo().type());
- assert(_ctx.at(output_index).typeInfo().type() == model::DataType::BOOL8);
+ assert(_ctx.at(output_index).typeInfo().type() == ir::DataType::BOOL8);
}
void OperationValidator::visit(const model::operation::Softmax &node)
// Verify operand here, not at SimpleEmbeddingLookup::configure() to avoid acl's modifying
// TensorShape sometimes(Issue: https://github.sec.samsung.net/STAR/nnfw/issues/729)
{
- assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32);
+ assert(lookups_obj.typeInfo().type() == ir::DataType::INT32);
const auto &output_shape = output_obj.shape();
const auto &lookups_shape = lookups_obj.shape();
const auto &keys_obj = _ctx.at(keys_index);
const auto &values_obj = _ctx.at(values_index);
- assert(lookups_obj.typeInfo().type() == neurun::model::DataType::INT32);
- assert(keys_obj.typeInfo().type() == neurun::model::DataType::INT32);
- assert(hits_obj.typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM);
+ assert(lookups_obj.typeInfo().type() == ir::DataType::INT32);
+ assert(keys_obj.typeInfo().type() == ir::DataType::INT32);
+ assert(hits_obj.typeInfo().type() == ir::DataType::QUANT8_ASYMM);
const auto &output_shape = output_obj.shape();
const auto &hits_shape = hits_obj.shape();
assert(_ctx.at(input_index).shape().rank() <= 4);
assert(_ctx.at(input_index).shape() == _ctx.at(output_index).shape());
- assert(_ctx.at(input_index).typeInfo().type() == neurun::model::DataType::QUANT8_ASYMM);
- assert(_ctx.at(output_index).typeInfo().type() == neurun::model::DataType::FLOAT32);
+ assert(_ctx.at(input_index).typeInfo().type() == ir::DataType::QUANT8_ASYMM);
+ assert(_ctx.at(output_index).typeInfo().type() == ir::DataType::FLOAT32);
}
void OperationValidator::visit(const model::operation::Mean &node)
assert(pad_shape.rank() == 2);
assert(pad_shape.dim(0) == input_rank);
assert(pad_shape.dim(1) == 2);
- assert(_ctx.at(pad_index).typeInfo().type() == model::DataType::INT32);
+ assert(_ctx.at(pad_index).typeInfo().type() == ir::DataType::INT32);
assert(_ctx.at(input_index).shape().rank() == _ctx.at(output_index).shape().rank());
}
// fill ExecTime:
bool is_quantized = exec->graph().operands().at(node->getInputs().at(0)).typeInfo().type() ==
- model::DataType::QUANT8_ASYMM;
+ ir::DataType::QUANT8_ASYMM;
uint32_t size = 0;
for (const auto &input : node->getInputs())
const model::TypeInfo &type, const void *buffer,
size_t length, ir::Layout io_layout)
{
- using ::neurun::model::DataType;
+ using ir::DataType;
switch (type.type())
{
case DataType::FLOAT32:
std::unique_ptr<ISink> ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type,
void *buffer, size_t length, ir::Layout io_layout)
{
- using ::neurun::model::DataType;
+ using ir::DataType;
switch (type.type())
{
case DataType::FLOAT32:
* @brief Return data type of tensor
* @return Data type of tensor
*/
- virtual model::DataType data_type() const = 0;
+ virtual ir::DataType data_type() const = 0;
/**
* @brief Return TensorInfo
* @return TensorInfo
size_t calcOffset(const util::Coordinates &coords) const override;
ir::Layout layout() const override;
bool has_padding() const override { return false; }
- model::DataType data_type() const override { return _info.typeInfo().type(); }
+ ir::DataType data_type() const override { return _info.typeInfo().type(); }
const model::OperandInfo &tensorInfo() const override { return _info; }
uint64_t num_elements() const override { return _info.shape().num_elements(); };
size_t calcOffset(const util::Coordinates &coords) const override;
ir::Layout layout() const override;
bool has_padding() const override { return false; }
- model::DataType data_type() const override { return _info.typeInfo().type(); }
+ ir::DataType data_type() const override { return _info.typeInfo().type(); }
const model::OperandInfo &tensorInfo() const override { return _info; }
uint64_t num_elements() const override { return _info.shape().num_elements(); };
const auto out_tensor = env->tensorAt(out_index);
const auto data_type = lhs_tensor->data_type();
- if (data_type == model::DataType::INT32)
+ if (data_type == ir::DataType::INT32)
{
invoke<int32_t>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
}
- else if (data_type == model::DataType::FLOAT32)
+ else if (data_type == ir::DataType::FLOAT32)
{
invoke<float>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
}
const auto out_tensor = env->tensorAt(out_index);
const auto data_type = in_tensor->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(in_tensor, out_tensor, avgpool_node.param());
}
const uint32_t axis = (axis_raw < 0) ? (axis_raw + out_tensor->num_dimensions()) : axis_raw;
const auto data_type = in_tensors[0]->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(in_tensors, out_tensor, axis);
}
const auto ofm_tensor = env->tensorAt(ofm_index);
const auto data_type = ifm_tensor->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
}
const auto ofm_tensor = env->tensorAt(ofm_index);
const auto data_type = ifm_tensor->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
}
const auto ofm_tensor = env->tensorAt(ofm_index);
const auto data_type = ifm_tensor->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
}
const auto out_tensor = env->tensorAt(out_index);
const auto data_type = in_tensor->data_type();
- if (data_type == model::DataType::FLOAT32)
+ if (data_type == ir::DataType::FLOAT32)
{
invoke(in_tensor, out_tensor, maxpool_node.param());
}
const auto in_data_type = in_tensor->data_type();
const auto out_data_type = out_tensor->data_type();
- if ((in_data_type == model::DataType::FLOAT32) && (out_data_type == model::DataType::FLOAT32))
+ if ((in_data_type == ir::DataType::FLOAT32) && (out_data_type == ir::DataType::FLOAT32))
{
invoke(in_tensor, out_tensor, softmax_node.param());
}
Permute::Permute(const OperandIndex &input, const OperandIndex &output,
const backend::BackendContext *input_backend_ctx,
- const backend::BackendContext *output_backend_ctx, Type type,
- model::DataType data_type)
+ const backend::BackendContext *output_backend_ctx, Type type, DataType data_type)
: model::Operation{OperandConstraint::createExact(1u)},
_param{input_backend_ctx, output_backend_ctx}, _type{type}, _dataType{data_type}
{
// Helper functions
model::Activation convertActivation(ActivationFunctionType type);
- model::DataType tensorTypeToDataType(TensorType type);
+ ir::DataType tensorTypeToDataType(TensorType type);
// Create operands form tflite::Tensor
model::OperandIndex loadOperand(const Tensor *tensor);
}
template <typename LoaderDomain, typename SpecificLoader>
-model::DataType
+ir::DataType
BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorTypeToDataType(const TensorType type)
{
switch (type)
{
case TensorType::TensorType_FLOAT32:
- return model::DataType::FLOAT32;
+ return ir::DataType::FLOAT32;
case TensorType::TensorType_INT32:
- return model::DataType::INT32;
+ return ir::DataType::INT32;
case TensorType::TensorType_BOOL:
- return model::DataType::BOOL8;
+ return ir::DataType::BOOL8;
case TensorType::TensorType_UINT8:
- return model::DataType::QUANT8_ASYMM;
+ return ir::DataType::QUANT8_ASYMM;
default:
throw std::runtime_error(
std::string("Unsupported tensor type: ").append(EnumNameTensorType(type)));
shape.append(dim);
}
// Type
- model::DataType data_type = tensorTypeToDataType(tensor->type());
+ ir::DataType data_type = tensorTypeToDataType(tensor->type());
// Quantization
auto q_params = tensor->quantization();
float scale = 0.0;
const OptionsType *options)
{
model::Shape shape;
- model::TypeInfo type_info(model::DataType::INT32);
// Strides
param.stride.vertical = options->stride_w();
param.stride.horizontal = options->stride_h();
loadStridesAndPaddings(param, options);
// Filter width and height
model::Shape shape;
- model::TypeInfo type_info(model::DataType::INT32);
// Strides
param.kw = options->filter_width();
param.kh = options->filter_height();
loadStridesAndPaddings(param, options);
// Multiplier
model::Shape shape;
- model::TypeInfo type_info(model::DataType::INT32);
param.multiplier = options->depth_multiplier();
// Dilation h/w factor unused
std::unique_ptr<model::Operation> new_op(
const auto *options = op->builtin_options_as_ConcatenationOptions();
// Axis
model::Shape shape;
- model::TypeInfo type_info(model::DataType::INT32);
param.axis = options->axis();
// activation unused
* @brief Convert data type from NNAPI to internal data type
* @param[in] type NNAPI's data type
* @return neurun's internal data type
- * @note Now neurun::model::DataType shares the same enum value\n
- with OperandCode in NeuralNetworks.h.\n
- If we don't share same value, we must fix this mapping function.
*/
- static ::neurun::model::DataType getDataType(OperandCode type);
+ static neurun::ir::DataType getDataType(OperandCode type);
/**
* @brief Convert operand type info from NNAPI to interanl operand type info
{
using namespace neurun::model;
-void replaceDataType(Operands &operands, const OperandIndex &index, const DataType &type)
+void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type)
{
assert(operands.exist(index));
operands.at(index).type(type);
#include <model/Shape.h>
#include <model/InternalType.h>
#include <model/TypeInfo.h>
-#include <model/DataType.h>
+#include <ir/DataType.h>
#include <model/operation/Add.h>
#include <model/operation/Sub.h>
{
using namespace neurun::model;
-using DataType = neurun::model::DataType;
+using DataType = DataType;
class CompiledMockUpModel
{
{
using namespace neurun::model;
-using DataType = neurun::model::DataType;
+using DataType = DataType;
using ExecManager = neurun::exec::interp::ExecManager;
using Execution = neurun::exec::Execution;
shape1.dim(2) = 30;
shape1.dim(3) = 40;
- ::neurun::model::TypeInfo type{neurun::model::DataType::INT32};
+ ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32};
set.emplace(shape0, type);
set.emplace(shape1, type);
neurun::graph::verifier::DAGChecker verifier;
neurun::model::Shape shape(3);
- neurun::model::TypeInfo type{neurun::model::DataType::INT32};
+ neurun::model::TypeInfo type{neurun::ir::DataType::INT32};
// Model Input/Output
auto input_operand = graph.addOperand(shape, type);
neurun::graph::Graph graph;
neurun::model::Shape shape{3};
- neurun::model::TypeInfo type{neurun::model::DataType::INT32};
+ neurun::model::TypeInfo type{neurun::ir::DataType::INT32};
// Add Conv
using Graph = neurun::model::operation::Conv2D;
neurun::model::Shape shape{3};
- neurun::model::TypeInfo type{neurun::model::DataType::INT32};
+ neurun::model::TypeInfo type{neurun::ir::DataType::INT32};
using Graph = neurun::model::operation::Concat;
neurun::graph::Graph graph;
::neurun::model::Shape shape{3};
- ::neurun::model::TypeInfo type{neurun::model::DataType::INT32};
+ ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32};
auto operand1 = graph.addOperand(shape, type);
auto operand2 = graph.addOperand(shape, type);