model::operation::SoftmaxNode::Param param;
const auto *options = op->builtin_options_as_SoftmaxOptions();
// Beta
- model::Shape shape;
- model::TypeInfo type_info(neurun::model::DataType::FLOAT32);
- const float &beta = options->beta();
- const uint8_t *ptr = reinterpret_cast<const uint8_t *>(&beta);
- auto scale_index = createOperand<float>(ptr, shape, type_info);
- param.scale_index = scale_index;
+ param.beta = options->beta();
std::unique_ptr<model::Operation> new_op(
new model::operation::SoftmaxNode(inputs, outputs, param));
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::SoftmaxNode::Input::INPUT)};
- const auto scale_index{node.param().scale_index};
struct Param
{
model::OperandIndex output_index;
model::OperandIndex input_index;
- float scale;
+ float beta;
};
Param param;
param.output_index = output_index;
param.input_index = input_index;
- param.scale = _ctx.at(scale_index).asScalar<float>();
+ param.beta = node.param().beta;
auto tensors = _tensor_builder;
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>();
- fn->configure(input_alloc->handle(), output_alloc->handle(), param.scale);
+ fn->configure(input_alloc->handle(), output_alloc->handle(), param.beta);
auto acl_fn = asAclFunction(std::move(fn));
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::SoftmaxNode::Input::INPUT)};
- const auto scale_index{node.param().scale_index};
struct Param
{
model::OperandIndex output_index;
model::OperandIndex input_index;
- float scale;
+ float beta;
};
Param param;
param.output_index = output_index;
param.input_index = input_index;
- param.scale = _ctx.at(scale_index).asScalar<float>();
+ param.beta = node.param().beta;
auto tensors = _tensor_builder;
auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>();
- fn->configure(input_alloc->handle(), output_alloc->handle(), param.scale);
+ fn->configure(input_alloc->handle(), output_alloc->handle(), param.beta);
auto acl_fn = asAclFunction(std::move(fn));
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::SoftmaxNode::Input::INPUT)};
- const auto scale_index{node.param().scale_index};
struct Param
{
::neurun::backend::cpu::kernel::Shape ofm_shape;
::neurun::backend::cpu::kernel::Shape ifm_shape;
- float scale;
+ float beta;
};
Param param;
param.ofm_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(output_index));
param.ifm_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(input_index));
- param.scale = _ctx.at(scale_index).asScalar<float>();
+ param.beta = node.param().beta;
auto tensors = _tensor_builder;
auto fn = nnfw::cpp14::make_unique<::neurun::backend::cpu::kernel::SoftMaxLayer>();
- fn->configure(input_alloc->buffer(), param.ifm_shape, param.scale, output_alloc->buffer(),
+ fn->configure(input_alloc->buffer(), param.ifm_shape, param.beta, output_alloc->buffer(),
param.ofm_shape);
builder.append(std::move(fn));
struct Param
{
- OperandIndex scale_index;
+ float beta;
};
public:
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(0)};
- const auto scale_index{node.param().scale_index};
UNUSED_RELEASE(output_index);
UNUSED_RELEASE(input_index);
assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
- assert(_ctx.at(scale_index).shape().rank() == 0);
}
void OperationValidator::visit(const model::operation::PermuteNode &node)
};
_map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
// Each input should be interpreted as follows:
OperandIndexSequence inputs{init_param.inputs[0]};
OperandIndexSequence outputs{init_param.outputs[0]};
+ const auto beta_index = OperandIndex{init_param.inputs[1]};
+
operation::SoftmaxNode::Param param;
- param.scale_index = OperandIndex{init_param.inputs[1]};
+ param.beta = operands.at(beta_index).asScalar<float>();
return new operation::SoftmaxNode{inputs, outputs, param};
};