auto ofm_shape = _ctx.at(ofm_index).shape();
auto axis_shape = _ctx.at(axis_index).shape();
- assert(_ctx.at(axis_index).hasData());
+ assert(_ctx.at(axis_index).isConstant());
// Axis dimension is always 1.
assert(axis_shape.rank() == 1);
assert((ifm_shape.rank() - 1) == ofm_shape.rank());
* @brief Get true if Object has data, otherwise @c false
* @return @c true if Object has data, otherwise @c false
*/
- bool hasData(void) const { return _data != nullptr; }
+ bool isConstant(void) const { return _data != nullptr; }
public:
template <typename T, typename... Args> void data(Args &&... args)
const auto &model_obj = _graph.operands().at(index);
// For only CONSTANTS
- if (!model_obj.hasData())
+ if (!model_obj.isConstant())
return;
auto type = model_obj.typeInfo().type();
assert(input_shape.rank() <= 4);
assert(output_shape.rank() <= input_shape.rank());
- assert(_ctx.at(axis_index).hasData());
+ assert(_ctx.at(axis_index).isConstant());
assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
assert(input_shape.rank() <= 4);
assert(output_shape.rank() <= input_shape.rank());
- assert(_ctx.at(axis_index).hasData());
+ assert(_ctx.at(axis_index).isConstant());
assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
UNUSED_RELEASE(ifm_shape);
UNUSED_RELEASE(ker_shape);
- assert(_ctx.at(padding_index).hasData() == true);
+ assert(_ctx.at(padding_index).isConstant() == true);
const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
assert(ifm_shape.rank() <= 4);
assert(indices_shape.rank() <= 3);
assert(ofm_shape.rank() <= 4);
- assert(_ctx.at(axis_index).hasData());
+ assert(_ctx.at(axis_index).isConstant());
assert(axis_shape.rank() == 0);
}
assert(ifm_shape.rank() <= 4);
assert(ofm_shape.rank() <= ifm_shape.rank());
- assert(_ctx.at(axis_index).hasData());
+ assert(_ctx.at(axis_index).isConstant());
assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
{
const auto activation_index = node.param().activation_index;
- if (!_model->operands().at(activation_index).hasData())
+ if (!_model->operands().at(activation_index).isConstant())
{
_nonConstParam = true;
}
auto axis_index = node.param().axis_index;
// To prepare concat elimination, axis should be constant
- if (!_ctx.at(axis_index).hasData())
+ if (!_ctx.at(axis_index).isConstant())
{
VERBOSE(SUBTENSOR) << "Cannot handle non-constant axis" << std::endl;
return;
}
else
{
- showing_cond = !object.hasData();
+ showing_cond = !object.isConstant();
}
- if (object.hasData() || _graph.getInputs().contains(index))
+ if (object.isConstant() || _graph.getInputs().contains(index))
{
showing_cond = showing_cond && (object.getUses().size() > 0);
}
// Allocate constant tensor
_model->operands.iterate([&](const model::operand::Index &ind,
const model::operand::Object &obj) {
- if (obj.hasData())
+ if (obj.isConstant())
{
VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value()
<< std::endl;
_env->model().operands.iterate(
[&](const model::operand::Index &ind, const model::operand::Object &obj) {
- if (obj.hasData())
+ if (obj.isConstant())
{
VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
{
// only valid_inputs
const auto &operand = _model->operands.at(input);
- if (operand.hasData())
+ if (operand.isConstant())
continue;
// This operand is input of operation, not weight or bias
{
// only valid_inputs
const auto &operand = _model->operands.at(input);
- if (operand.hasData())
+ if (operand.isConstant())
continue;
auto it = input_to_subgs.find(input);
// If a tensor is a constant, increase the use of the tensor.
// It makes the tensor not be dealloced.
- if (obj.hasData())
+ if (obj.isConstant())
{
constants.push_back(ind);
uses_map[ind]++;
void Object::appendDef(const ::neurun::model::OperationIndex &idx)
{
- assert(_usage != Usage::NOT_DEFINED && !hasData());
+ assert(_usage != Usage::NOT_DEFINED && !isConstant());
assert(_def.size() == 0);
_def.append(idx);
bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept
{
const neurun::model::operand::Index ind{index};
- return (!_model->operands.at(ind).hasData() && !_model->inputs.contains(ind));
+ return (!_model->operands.at(ind).isConstant() && !_model->inputs.contains(ind));
}
void ANeuralNetworksModel::setOptionalOperand(const neurun::model::operand::Index idx)