create(const std::string &name, Args &&... args)
{
auto op = new T(std::forward<Args>(args)...);
- op->setId(_lastNodeId++);
+ op->setId(_last_node_id++);
op->setName(name);
registerOp(op);
return op;
{
assert(inputs.size() == old_op->getNumInputs());
auto op = old_op->copyWithInputs(inputs);
- op->setId(_lastNodeId++);
+ op->setId(_last_node_id++);
op->setName(old_op->getName());
registerOp(op);
return op;
void registerOp(Operation *op);
std::unordered_set<Operation *> _ops;
- size_t _lastNodeId = 0;
+ size_t _last_node_id = 0;
// TODO Change these to unordered_sets.
std::vector<ops::InputOp *> _inputs;
std::vector<ops::OutputOp *> _outputs;
void visit(ops::TanhOp &op) override;
void visit(ops::TransposeOp &op) override;
- void writeDot(std::ostream &os) { dotBuilder.writeDot(os); };
+ void writeDot(std::ostream &os) { _dot_builder.writeDot(os); };
private:
- IrDotBuilder dotBuilder;
+ IrDotBuilder _dot_builder;
};
} // namespace mir
*/
explicit Scalar(const char *data, DTYPE dtype, unsigned data_size)
{ // NOLINT(cppcoreguidelines-pro-type-member-init, hicpp-member-init)
- assert(data_size <= maxScalarLength);
- _dataType = dtype;
+ assert(data_size <= _max_scalar_length);
+ _data_type = dtype;
memcpy(_data, data, data_size);
}
/**
* @return Data type
*/
- DTYPE getDataType() { return _dataType; }
+ DTYPE getDataType() { return _data_type; }
/**
* @return Data size
*/
int getDataSize() const
{
- switch (_dataType)
+ switch (_data_type)
{
case DTYPE::UNKNOWN:
return -1;
}
private:
- static const unsigned int maxScalarLength = 8;
- DTYPE _dataType;
- char _data[maxScalarLength];
+ static const unsigned int _max_scalar_length = 8;
+ DTYPE _data_type;
+ char _data[_max_scalar_length];
};
} // namespace mir
}
private:
- explicit ShapeIter(Shape &_shape, int32_t pos) : _pos(pos), _shape(_shape)
+ explicit ShapeIter(Shape &shape, int32_t pos) : _pos(pos), _shape(shape)
{
- _index.resize(_shape.rank());
+ _index.resize(shape.rank());
_index.fill(0);
}
ExternalRegion<T> getRegion(const Index &idx)
{
// Only last dimension is safe to process continiously
- auto lastDim = _shape.rank() - 1;
+ auto last_dim = _shape.rank() - 1;
auto base = reinterpret_cast<T *>(_proxy.at(idx));
- auto length = _shape.dim(lastDim) - idx.at(lastDim);
+ auto length = _shape.dim(last_dim) - idx.at(last_dim);
return ExternalRegion<T>(base, length);
}
virtual ~TensorVariant() = default;
- char *at(const Index &idx) const { return _data.get() + getOffset(idx) * _elementSize; }
+ char *at(const Index &idx) const { return _data.get() + getOffset(idx) * _element_size; }
char *atOffset(int32_t offset) const
{
assert(offset >= 0 && offset < getShape().numElements());
- return _data.get() + offset * _elementSize;
+ return _data.get() + offset * _element_size;
}
size_t getOffset(const Index &idx) const
const Shape &getShape() const { return _shape; }
DTYPE getDataType() const { return _dtype; }
- size_t getElementSize() const { return _elementSize; }
+ size_t getElementSize() const { return _element_size; }
private:
DTYPE _dtype;
adt::small_vector<int_fast32_t, MAX_DIMENSION_COUNT> _strides;
Shape _shape;
- size_t _elementSize;
+ size_t _element_size;
};
} // namespace mir
public:
explicit IrDotBuilder() = default;
- void updateWithOp(const Operation *op, const DotIrNodeInfo &irNodeInfo);
+ void updateWithOp(const Operation *op, const DotIrNodeInfo &ir_node_info);
void writeDot(std::ostream &os);
private:
- void addNode(const Operation *op, const DotIrNodeInfo &irNode);
+ void addNode(const Operation *op, const DotIrNodeInfo &ir_node);
void addEdge(const Operation *op1, const Operation *op2);
std::stringstream dot;
DotIrNodeInfo() = default;
- DotIrNodeInfo &withType(const std::string &typeName, const std::string &nodeName);
- DotIrNodeInfo &withInShapes(Shapes &&inShapes);
- DotIrNodeInfo &withOutShapes(Shapes &&outShapes);
-
- DotIrNodeInfo &withStride(const Shape &strideShape);
- DotIrNodeInfo &withShape(const std::string &shapeName, const Shape &shape);
- DotIrNodeInfo &withPadType(PadType padType);
- DotIrNodeInfo &withPoolType(PoolType poolType);
+ DotIrNodeInfo &withType(const std::string &type_name, const std::string &node_name);
+ DotIrNodeInfo &withInShapes(Shapes &&in_shapes);
+ DotIrNodeInfo &withOutShapes(Shapes &&out_shapes);
+
+ DotIrNodeInfo &withStride(const Shape &stride_shape);
+ DotIrNodeInfo &withShape(const std::string &shape_name, const Shape &shape);
+ DotIrNodeInfo &withPadType(PadType pad_type);
+ DotIrNodeInfo &withPoolType(PoolType pool_type);
DotIrNodeInfo &withMisc(const std::string &name, Stringable val);
/**
std::string labelForPadAndPool() const;
std::string labelForNodeParams() const;
- void addPipeIfNeeded(std::stringstream &ss, bool needed, bool &needPipe) const;
+ void addPipeIfNeeded(std::stringstream &ss, bool needed, bool &need_pipe) const;
- std::string typeName;
- std::string nodeName;
+ std::string _type_name;
+ std::string _node_name;
- Shapes inShapes;
- Shapes outShapes;
+ Shapes _in_shapes;
+ Shapes _out_shapes;
- Shape kernelShape;
+ Shape _kernel_shape;
- Shape strideShape;
- std::vector<NamedShape> shapes;
- std::vector<MiscVal> miscVals;
+ Shape _stride_shape;
+ std::vector<NamedShape> _shapes;
+ std::vector<MiscVal> _misc_vals;
- bool hasPad = false;
- PadType padType = PadType::Valid;
+ bool _has_pad = false;
+ PadType _pad_type = PadType::Valid;
- bool hasPool = false;
- PoolType poolType = PoolType::MAX;
+ bool _has_pool = false;
+ PoolType _pool_type = PoolType::MAX;
};
template <typename T> DotIrNodeInfo::Stringable::Stringable(T val) : _val(std::to_string(val)) {}
class BatchNormOp : public Operation
{
public:
- BatchNormOp(Output *arg, float movingAvgFraction, float eps, bool spatial)
- : Operation(Type::batchNorm, {arg}), _movingAvgFraction(movingAvgFraction), _eps(eps),
+ BatchNormOp(Output *arg, float moving_avg_fraction, float eps, bool spatial)
+ : Operation(Type::batchNorm, {arg}), _moving_avg_fraction(moving_avg_fraction), _eps(eps),
_spatial(spatial)
{
// Infer output shape.
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new BatchNormOp(inputs[0], _movingAvgFraction, _eps, _spatial);
+ return new BatchNormOp(inputs[0], _moving_avg_fraction, _eps, _spatial);
}
/**
* @return Factor used in computing the running mean and variance.
* e.g., running_mean = running_mean * movingAvgFraction + mean * (1 - movingAvgFraction).
*/
- float getMovingAvgFraction() const { return _movingAvgFraction; }
+ float getMovingAvgFraction() const { return _moving_avg_fraction; }
/**
* @return If true, compute the mean and variance across all spatial elements If false, compute
bool getSpatial() const { return _spatial; }
private:
- float _movingAvgFraction;
+ float _moving_avg_fraction;
float _eps;
bool _spatial;
};
Conv2DOp(Output *input, Output *kernel, const Shape &strides, std::vector<int32_t> padding_before,
std::vector<int32_t> padding_after)
: Operation(Type::conv2D, {input, kernel}), _strides(strides),
- _paddingBefore(std::move(padding_before)), _paddingAfter(std::move(padding_after))
+ _padding_before(std::move(padding_before)), _padding_after(std::move(padding_after))
{
inferOutputShapes();
}
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new Conv2DOp(inputs[0], inputs[1], _strides, _paddingBefore, _paddingAfter);
+ return new Conv2DOp(inputs[0], inputs[1], _strides, _padding_before, _padding_after);
};
const Shape &getStrides() const { return _strides; }
- const std::vector<int32_t> &getPaddingBefore() const { return _paddingBefore; }
+ const std::vector<int32_t> &getPaddingBefore() const { return _padding_before; }
- const std::vector<int32_t> &getPaddingAfter() const { return _paddingAfter; }
+ const std::vector<int32_t> &getPaddingAfter() const { return _padding_after; }
private:
void inferOutputShapes();
Shape _strides;
- std::vector<int32_t> _paddingBefore;
- std::vector<int32_t> _paddingAfter;
+ std::vector<int32_t> _padding_before;
+ std::vector<int32_t> _padding_after;
};
} // namespace ops
DeConv2DOp(Output *input, Output *kernel, const Shape &strides,
const std::vector<int32_t> &paddings)
: Operation(Type::deConv2D, {input, kernel}), _strides(strides),
- _paddingType(PaddingType::Custom), _paddingBefore(paddings), _paddingAfter(paddings)
+ _padding_type(PaddingType::Custom), _padding_before(paddings), _padding_after(paddings)
{
inferOutputShapes();
}
DeConv2DOp(Output *input, Output *kernel, const Shape &strides, PaddingType padding_type)
- : Operation(Type::deConv2D, {input, kernel}), _strides(strides), _paddingType(padding_type),
- _paddingBefore(2), _paddingAfter(2)
+ : Operation(Type::deConv2D, {input, kernel}), _strides(strides), _padding_type(padding_type),
+ _padding_before(2), _padding_after(2)
{
- assert(_paddingType != PaddingType::Custom);
+ assert(_padding_type != PaddingType::Custom);
inferOutputShapes();
}
DeConv2DOp(Output *input, Output *kernel, const Shape &strides, PaddingType padding_type,
const Shape &output_shape)
- : Operation(Type::deConv2D, {input, kernel}), _strides(strides), _paddingType(padding_type),
- _paddingBefore(2), _paddingAfter(2)
+ : Operation(Type::deConv2D, {input, kernel}), _strides(strides), _padding_type(padding_type),
+ _padding_before(2), _padding_after(2)
{
- assert(_paddingType != PaddingType::Custom);
+ assert(_padding_type != PaddingType::Custom);
setOutputShape(0, output_shape);
inferPaddings();
}
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new DeConv2DOp(inputs[0], inputs[1], _strides, _paddingAfter);
+ return new DeConv2DOp(inputs[0], inputs[1], _strides, _padding_after);
};
const Shape &getStrides() const { return _strides; }
- PaddingType getPaddingType() const { return _paddingType; }
+ PaddingType getPaddingType() const { return _padding_type; }
- const std::vector<int32_t> &getPaddingBefore() const { return _paddingBefore; }
+ const std::vector<int32_t> &getPaddingBefore() const { return _padding_before; }
- const std::vector<int32_t> &getPaddingAfter() const { return _paddingAfter; }
+ const std::vector<int32_t> &getPaddingAfter() const { return _padding_after; }
private:
void inferOutputShapes();
void inferPaddings();
Shape _strides;
- PaddingType _paddingType;
- std::vector<int32_t> _paddingBefore;
- std::vector<int32_t> _paddingAfter;
+ PaddingType _padding_type;
+ std::vector<int32_t> _padding_before;
+ std::vector<int32_t> _padding_after;
};
} // namespace ops
DepthwiseConv2DOp(Output *input, Output *kernel, const Shape &strides,
std::vector<int32_t> padding_before, std::vector<int32_t> padding_after)
: Operation(Type::depthwiseConv, {input, kernel}), _strides(strides),
- _paddingBefore(std::move(padding_before)), _paddingAfter(std::move(padding_after))
+ _padding_before(std::move(padding_before)), _padding_after(std::move(padding_after))
{
inferOutputShapes();
}
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new DepthwiseConv2DOp(inputs[0], inputs[1], _strides, _paddingBefore, _paddingAfter);
+ return new DepthwiseConv2DOp(inputs[0], inputs[1], _strides, _padding_before, _padding_after);
}
const Shape &getStrides() const { return _strides; }
- const std::vector<int32_t> &getPaddingBefore() const { return _paddingBefore; }
+ const std::vector<int32_t> &getPaddingBefore() const { return _padding_before; }
- const std::vector<int32_t> &getPaddingAfter() const { return _paddingAfter; }
+ const std::vector<int32_t> &getPaddingAfter() const { return _padding_after; }
private:
void inferOutputShapes();
Shape _strides;
- std::vector<int32_t> _paddingBefore;
- std::vector<int32_t> _paddingAfter;
+ std::vector<int32_t> _padding_before;
+ std::vector<int32_t> _padding_after;
};
} // namespace ops
* @param num_inputs Number of inputs
*/
ElementwiseOp(const std::vector<Output *> &args, OpType op_type)
- : Operation(Type::elementwise, args), _opType(op_type), _needsBroadcast(false)
+ : Operation(Type::elementwise, args), _op_type(op_type), _needs_broadcast(false)
{
inferOutputShapes();
};
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new ElementwiseOp(inputs, _opType);
+ return new ElementwiseOp(inputs, _op_type);
}
- bool getBroadcast() const { return _needsBroadcast; }
+ bool getBroadcast() const { return _needs_broadcast; }
private:
void inferOutputShapes();
- OpType _opType;
- bool _needsBroadcast;
+ OpType _op_type;
+ bool _needs_broadcast;
public:
- OpType getOpType() const { return _opType; }
+ OpType getOpType() const { return _op_type; }
};
} // namespace ops
/**
* @brief Class for Pad operation in modelIR
* @param arg The input
- * @param numDims Number of dimensions
+ * @param num_dims Number of dimensions
* @param paddings Vector with pairs of paddings (left, right)
* @param scalar_value Constant value filling padded region
*/
- PadOp(Output *arg, int32_t numDims, std::vector<std::pair<int32_t, int32_t>> paddings,
+ PadOp(Output *arg, int32_t num_dims, std::vector<std::pair<int32_t, int32_t>> paddings,
const Scalar &scalar_value)
- : Operation(Type::pad, {arg}), _numDims(numDims), _paddings(std::move(paddings)),
- _scalarValue(scalar_value)
+ : Operation(Type::pad, {arg}), _num_dims(num_dims), _paddings(std::move(paddings)),
+ _scalar_value(scalar_value)
{
inferOutputShapes();
}
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new PadOp(inputs[0], _numDims, _paddings, _scalarValue);
+ return new PadOp(inputs[0], _num_dims, _paddings, _scalar_value);
}
/**
/**
* @return Number of dimensions
*/
- int getNumDim() const { return _numDims; }
+ int getNumDim() const { return _num_dims; }
/**
* @return Scalar value
*/
- Scalar getScalar() const { return _scalarValue; }
+ Scalar getScalar() const { return _scalar_value; }
private:
void inferOutputShapes();
std::vector<std::pair<int32_t, int32_t>> _paddings;
- int32_t _numDims;
- Scalar _scalarValue;
+ int32_t _num_dims;
+ Scalar _scalar_value;
};
} // namespace ops
PoolOp(Output *arg, PoolingType pooling_type, const Shape &window_shape, const Shape &strides,
std::vector<int32_t> padding_before, std::vector<int32_t> padding_after,
BorderType border_type)
- : Operation(Type::pool, {arg}), _poolingType(pooling_type), _windowShape(window_shape),
- _strides(strides), _paddingBefore(std::move(padding_before)),
- _paddingAfter(std::move(padding_after)), _borderType(border_type)
+ : Operation(Type::pool, {arg}), _pooling_type(pooling_type), _window_shape(window_shape),
+ _strides(strides), _padding_before(std::move(padding_before)),
+ _padding_after(std::move(padding_after)), _border_type(border_type)
{
inferOutputShapes();
}
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new PoolOp(inputs[0], _poolingType, _windowShape, _strides, _paddingBefore,
- _paddingAfter, _borderType);
+ return new PoolOp(inputs[0], _pooling_type, _window_shape, _strides, _padding_before,
+ _padding_after, _border_type);
};
- BorderType getBorderType() const { return _borderType; }
+ BorderType getBorderType() const { return _border_type; }
- PoolingType getPoolingType() const { return _poolingType; }
+ PoolingType getPoolingType() const { return _pooling_type; }
- const Shape &getWindowShape() const { return _windowShape; }
+ const Shape &getWindowShape() const { return _window_shape; }
const Shape &getStrides() const { return _strides; }
- const std::vector<int32_t> &getPaddingBefore() const { return _paddingBefore; }
+ const std::vector<int32_t> &getPaddingBefore() const { return _padding_before; }
- const std::vector<int32_t> &getPaddingAfter() const { return _paddingAfter; }
+ const std::vector<int32_t> &getPaddingAfter() const { return _padding_after; }
private:
void inferOutputShapes();
- PoolingType _poolingType;
- Shape _windowShape;
+ PoolingType _pooling_type;
+ Shape _window_shape;
Shape _strides;
- std::vector<int32_t> _paddingBefore;
- std::vector<int32_t> _paddingAfter;
- BorderType _borderType;
+ std::vector<int32_t> _padding_before;
+ std::vector<int32_t> _padding_after;
+ BorderType _border_type;
};
} // namespace ops
* @param func_type function to reduce the tensor with (should be associative)
*/
ReduceOp(Output *arg, std::vector<int32_t> reduce_dims, bool keep_dims, FuncType func_type)
- : Operation(Type::reduce, {arg}), _reduceDims(std::move(reduce_dims)), _keepDims(keep_dims),
- _funcType(func_type)
+ : Operation(Type::reduce, {arg}), _reduce_dims(std::move(reduce_dims)), _keep_dims(keep_dims),
+ _func_type(func_type)
{
// Infer output shapes.
Operation *copyWithInputs(const std::vector<Output *> &inputs) override
{
- return new ReduceOp(inputs[0], _reduceDims, _keepDims, _funcType);
+ return new ReduceOp(inputs[0], _reduce_dims, _keep_dims, _func_type);
}
- const std::vector<int32_t> &getReductionDims() const { return _reduceDims; };
+ const std::vector<int32_t> &getReductionDims() const { return _reduce_dims; };
- bool getKeepDims() const { return _keepDims; };
+ bool getKeepDims() const { return _keep_dims; };
- FuncType getFuncType() const { return _funcType; };
+ FuncType getFuncType() const { return _func_type; };
private:
- std::vector<int32_t> _reduceDims;
- bool _keepDims;
- FuncType _funcType;
+ std::vector<int32_t> _reduce_dims;
+ bool _keep_dims;
+ FuncType _func_type;
};
} // namespace ops
public:
TransposeOp(Output *arg, std::vector<std::size_t> axis_order);
- const std::vector<std::size_t> &getAxisOrder() const { return _axisOrder; }
+ const std::vector<std::size_t> &getAxisOrder() const { return _axis_order; }
Operation *copyWithInputs(const std::vector<Output *> &arg) override
{
- return new TransposeOp(arg[0], _axisOrder);
+ return new TransposeOp(arg[0], _axis_order);
}
private:
void inferOutputShapes();
- std::vector<std::size_t> _axisOrder;
+ std::vector<std::size_t> _axis_order;
};
} // namespace ops
Operation *dst_node = consumer->getNode();
if (known_ops.count(dst_node) == 0)
{
- bool allInputsResolved = true;
+ bool all_inputs_resolved = true;
for (auto &dst_input : dst_node->getInputs())
{
if (known_ops.count(dst_input.getProducer()->getNode()) == 0)
{
- allInputsResolved = false;
+ all_inputs_resolved = false;
}
}
- if (allInputsResolved)
+ if (all_inputs_resolved)
{
known_ops.insert(dst_node);
q.push_back(dst_node);
#include "mir/Index.h"
#include <algorithm>
-#include <cassert>
namespace mir
{
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::CappedReluOp &op)
.withOutShapes(getOutputShapes(op))
.withMisc("Cap", op.getCap());
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::ConcatOp &op)
.withOutShapes(getOutputShapes(op))
.withMisc("Axis", op.getAxis());
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::Conv2DOp &op)
.withShape("Padding before", Shape(op.getPaddingBefore()))
.withShape("Padding after", Shape(op.getPaddingAfter()));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::DepthwiseConv2DOp &op)
.withShape("Padding before", Shape(op.getPaddingBefore()))
.withShape("Padding after", Shape(op.getPaddingAfter()));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::FullyConnectedOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::GemmOp &op)
.withType("Gemm", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::SoftmaxOp &op)
.withOutShapes(getOutputShapes(op))
.withMisc("Axis", op.getAxis());
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::PoolOp &op)
.withShape("Padding before", Shape(op.getPaddingBefore()))
.withShape("Padding after", Shape(op.getPaddingAfter()));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::ReluOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::ReshapeOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::InputOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::ConstantOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::BatchNormOp &op)
.withMisc("Moving Average Fraction", op.getMovingAvgFraction())
.withMisc("Eps", op.getEps())
.withMisc("Spatial", op.getSpatial());
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::ScaleOp &op)
.withType("ScaleOp", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::SliceOp &op)
.withShape("Sizes", op.getSizes())
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::DropoutOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("DropRate", op.getRate());
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void IrDotDumper::visit(ops::DeConv2DOp &op)
.withPadType(op.getPaddingType())
.withStride(op.getStrides());
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::EluOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, nodeInfo);
+ _dot_builder.updateWithOp(&op, nodeInfo);
}
void mir::IrDotDumper::visit(ops::ElementwiseOp &op)
.withOutShapes(getOutputShapes(op))
.withMisc("Operation", op_types.at(op.getOpType()));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::SqueezeOp &op)
node_info.withMisc("SqueezeDim", dim);
}
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void mir::IrDotDumper::visit(ops::PadOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::SqrtOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::ReduceOp &op)
.withMisc("Keep Dims", op.getKeepDims())
.withMisc("OPType", types.at(op.getFuncType()));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::ResizeOp &op)
.withMisc("Mode", modes.at(op.getMode()));
// scale is only needed in Shape Inference
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::TransposeOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::GatherOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::SigmoidOp &op)
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(mir::ops::LeakyReluOp &op)
.withOutShapes(getOutputShapes(op))
.withMisc("alpha", op.getAlpha());
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
void IrDotDumper::visit(ops::OutputOp &op)
auto node_info =
DotIrNodeInfo().withType("OutputOp", op.getName()).withInShapes(getInputShapes(op));
- dotBuilder.updateWithOp(&op, node_info);
+ _dot_builder.updateWithOp(&op, node_info);
}
} // namespace mir
switch (dtype)
{
case DTYPE::FLOAT32:
- _elementSize = sizeof(float);
+ _element_size = sizeof(float);
break;
case DTYPE::FLOAT64:
- _elementSize = sizeof(double);
+ _element_size = sizeof(double);
break;
case DTYPE::INT32:
- _elementSize = sizeof(int32_t);
+ _element_size = sizeof(int32_t);
break;
case DTYPE::INT64:
- _elementSize = sizeof(int64_t);
+ _element_size = sizeof(int64_t);
break;
default:
assert(false);
}
- std::size_t data_size = _shape.numElements() * _elementSize;
+ std::size_t data_size = _shape.numElements() * _element_size;
_data.reset(new char[data_size], std::default_delete<char[]>());
int stride = 1;
TensorVariant::TensorVariant(DTYPE dtype, const Shape &shape, const void *data)
: TensorVariant(dtype, shape)
{
- std::size_t data_size = _shape.numElements() * _elementSize;
+ std::size_t data_size = _shape.numElements() * _element_size;
std::memcpy(_data.get(), data, data_size);
}
*/
TensorVariant::TensorVariant(const TensorVariant &t_old, const Shape &shape)
: _dtype(t_old._dtype), _data(t_old._data), _strides(static_cast<size_t>(shape.rank())),
- _shape(shape), _elementSize(t_old._elementSize)
+ _shape(shape), _element_size(t_old._element_size)
{
int axis_old = t_old._shape.rank() - 1;
for (int d = shape.rank() - 1; d >= 0; d--)
namespace mir
{
-void IrDotBuilder::updateWithOp(const Operation *op, const DotIrNodeInfo &irNodeInfo)
+void IrDotBuilder::updateWithOp(const Operation *op, const DotIrNodeInfo &ir_node_info)
{
- addNode(op, irNodeInfo);
+ addNode(op, ir_node_info);
for (auto &prev : op->getInputs())
{
addEdge(prev.getProducer()->getNode(), op);
os << "digraph D {" << std::endl << dot.str() << std::endl << "}" << std::endl;
}
-void IrDotBuilder::addNode(const Operation *op, const DotIrNodeInfo &irNode)
+void IrDotBuilder::addNode(const Operation *op, const DotIrNodeInfo &ir_node)
{
- dot << op->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl;
+ dot << op->getId() << " [shape=record label=\"" << ir_node.getLabel() << "\"];" << std::endl;
}
void IrDotBuilder::addEdge(const Operation *op1, const Operation *op2)
template <> DotIrNodeInfo::Stringable::Stringable(const char *val) : _val(val) {}
-DotIrNodeInfo &DotIrNodeInfo::withType(const std::string &typeName, const std::string &nodeName)
+DotIrNodeInfo &DotIrNodeInfo::withType(const std::string &type_name, const std::string &node_name)
{
- this->typeName = typeName;
- this->nodeName = nodeName;
+ this->_type_name = type_name;
+ this->_node_name = node_name;
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withInShapes(DotIrNodeInfo::Shapes &&inShapes)
+DotIrNodeInfo &DotIrNodeInfo::withInShapes(DotIrNodeInfo::Shapes &&in_shapes)
{
- this->inShapes = inShapes;
+ this->_in_shapes = in_shapes;
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withOutShapes(DotIrNodeInfo::Shapes &&outShapes)
+DotIrNodeInfo &DotIrNodeInfo::withOutShapes(DotIrNodeInfo::Shapes &&out_shapes)
{
- this->outShapes = outShapes;
+ this->_out_shapes = out_shapes;
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withStride(const Shape &strideShape)
+DotIrNodeInfo &DotIrNodeInfo::withStride(const Shape &stride_shape)
{
- this->strideShape = strideShape;
+ this->_stride_shape = stride_shape;
return *this;
}
*/
DotIrNodeInfo &DotIrNodeInfo::withMisc(const std::string &name, Stringable val)
{
- this->miscVals.emplace_back(name, std::move(val));
+ this->_misc_vals.emplace_back(name, std::move(val));
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withShape(const std::string &shapeName, const Shape &shape)
+DotIrNodeInfo &DotIrNodeInfo::withShape(const std::string &shape_name, const Shape &shape)
{
- this->shapes.emplace_back(shapeName, shape);
+ this->_shapes.emplace_back(shape_name, shape);
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withPadType(DotIrNodeInfo::PadType padType)
+DotIrNodeInfo &DotIrNodeInfo::withPadType(DotIrNodeInfo::PadType pad_type)
{
- this->padType = padType;
- this->hasPad = true;
+ this->_pad_type = pad_type;
+ this->_has_pad = true;
return *this;
}
-DotIrNodeInfo &DotIrNodeInfo::withPoolType(DotIrNodeInfo::PoolType poolType)
+DotIrNodeInfo &DotIrNodeInfo::withPoolType(DotIrNodeInfo::PoolType pool_type)
{
- this->poolType = poolType;
- this->hasPool = true;
+ this->_pool_type = pool_type;
+ this->_has_pool = true;
return *this;
}
ss << "{";
// Node type and name
- ss << (!typeName.empty() ? typeName : "TYPE_NOT_SET") << ": "
- << (!nodeName.empty() ? nodeName : "NAME_NOT_SET");
+ ss << (!_type_name.empty() ? _type_name : "TYPE_NOT_SET") << ": "
+ << (!_node_name.empty() ? _node_name : "NAME_NOT_SET");
- if (typeName.empty())
+ if (_type_name.empty())
{
std::cout << "Warning: Node type is not set for "
- << (nodeName.empty() ? "one of the nodes" : "node " + nodeName) << std::endl;
+ << (_node_name.empty() ? "one of the nodes" : "node " + _node_name) << std::endl;
}
ss << " | ";
{
std::stringstream ss;
- if (hasPad)
+ if (_has_pad)
{
ss << "{PadType: ";
- switch (padType)
+ switch (_pad_type)
{
case PadType::Valid:
ss << "VALID";
assert(false && "Unknown Padding type");
break;
}
- if (hasPool)
+ if (_has_pool)
ss << " | ";
else
ss << "}";
}
- if (hasPool)
+ if (_has_pool)
{
- if (!hasPad)
+ if (!_has_pad)
ss << "{";
- std::string poolTypeStr;
- switch (poolType)
+ std::string pool_type_str;
+ switch (_pool_type)
{
case PoolType::MAX:
- poolTypeStr = "MAX";
+ pool_type_str = "MAX";
break;
case PoolType::AVG:
- poolTypeStr = "AVG";
+ pool_type_str = "AVG";
break;
case PoolType::MIN:
- poolTypeStr = "MIN";
+ pool_type_str = "MIN";
break;
default:
assert(false && "Unknown PoolType");
}
- ss << "PoolType: " << poolTypeStr;
+ ss << "PoolType: " << pool_type_str;
ss << "}";
}
void DotIrNodeInfo::writeInShapesLabel(std::stringstream &ss) const
{
- if (inShapes.empty())
+ if (_in_shapes.empty())
ss << "IN_SHAPES_NOT_SET";
else
{
- for (Shapes::size_type i = 0; i < inShapes.size(); ++i)
+ for (Shapes::size_type i = 0; i < _in_shapes.size(); ++i)
{
if (i != 0)
ss << " | ";
- ss << "in" << i << ": " << inShapes[i];
+ ss << "in" << i << ": " << _in_shapes[i];
}
}
}
void DotIrNodeInfo::writeOutShapesLabel(std::stringstream &ss) const
{
- if (outShapes.empty())
+ if (_out_shapes.empty())
ss << "OUT_SHAPES_NOT_SET";
else
{
- for (Shapes::size_type i = 0; i < outShapes.size(); ++i)
+ for (Shapes::size_type i = 0; i < _out_shapes.size(); ++i)
{
if (i != 0)
ss << "| ";
- ss << "out" << i << ": " << outShapes[i];
+ ss << "out" << i << ": " << _out_shapes[i];
}
}
}
{
std::stringstream ss;
- bool needPipe = false;
- if (kernelShape.rank() != 0)
+ bool need_pipe = false;
+ if (_kernel_shape.rank() != 0)
{
- ss << "Kernel: " << kernelShape;
- needPipe = true;
+ ss << "Kernel: " << _kernel_shape;
+ need_pipe = true;
}
std::string label = labelForPadAndPool();
- addPipeIfNeeded(ss, !label.empty(), needPipe);
+ addPipeIfNeeded(ss, !label.empty(), need_pipe);
ss << label;
- addPipeIfNeeded(ss, !shapes.empty(), needPipe);
- for (Shapes::size_type i = 0; i < shapes.size(); ++i)
+ addPipeIfNeeded(ss, !_shapes.empty(), need_pipe);
+ for (Shapes::size_type i = 0; i < _shapes.size(); ++i)
{
if (i != 0)
ss << " | ";
- ss << shapes[i].first << ": " << shapes[i].second;
+ ss << _shapes[i].first << ": " << _shapes[i].second;
}
- if (strideShape.rank() != 0)
+ if (_stride_shape.rank() != 0)
{
- addPipeIfNeeded(ss, true, needPipe);
- ss << "Stride: " << strideShape;
+ addPipeIfNeeded(ss, true, need_pipe);
+ ss << "Stride: " << _stride_shape;
}
// misc scalar parameters (Cap, dropRate, etc..)
- addPipeIfNeeded(ss, !miscVals.empty(), needPipe);
- for (Shapes::size_type i = 0; i < miscVals.size(); ++i)
+ addPipeIfNeeded(ss, !_misc_vals.empty(), need_pipe);
+ for (Shapes::size_type i = 0; i < _misc_vals.size(); ++i)
{
if (i != 0)
ss << " | ";
- ss << miscVals[i].first << ": " << miscVals[i].second;
+ ss << _misc_vals[i].first << ": " << _misc_vals[i].second;
}
return ss.str();
}
-void DotIrNodeInfo::addPipeIfNeeded(std::stringstream &ss, bool needed, bool &needPipe) const
+void DotIrNodeInfo::addPipeIfNeeded(std::stringstream &ss, bool needed, bool &need_pipe) const
{
if (needed)
{
- if (needPipe)
+ if (need_pipe)
ss << " | ";
else
- needPipe = true;
+ need_pipe = true;
}
}
assert(kernel_shape.rank() == 4);
assert(kernel_shape.dim(3) == input_shape.dim(3));
assert(_strides.rank() == 2);
- assert(_paddingBefore.size() == 2);
- assert(_paddingAfter.size() == 2);
+ assert(_padding_before.size() == 2);
+ assert(_padding_after.size() == 2);
Shape output_shape(4);
// Height and width.
for (int i = 0; i < 2; i++)
{
- auto padded_input = input_shape.dim(1 + i) + _paddingBefore[i] + _paddingAfter[i];
+ auto padded_input = input_shape.dim(1 + i) + _padding_before[i] + _padding_after[i];
// out_size = ceil((in_size - kernel_size + 1) / stride) =
// (in_size - kernel_size + 1 + stride - 1) / stride =
// (in_size - kernel_size) / stride + 1
int pad = (input_shape.dim(d + 1) - 1) * _strides.dim(d) + kernel_shape.dim(d) -
output_shape.dim(d + 1);
- _paddingBefore[d] = pad / 2;
- _paddingAfter[d] = pad - _paddingBefore[d];
+ _padding_before[d] = pad / 2;
+ _padding_after[d] = pad - _padding_before[d];
}
}
output_shape.dim(3) = kernel_shape.dim(2);
// Height and width.
- switch (_paddingType)
+ switch (_padding_type)
{
case ops::PaddingType::Same:
for (int d = 0; d < 2; d++)
case ops::PaddingType::Custom:
for (int d = 0; d < 2; d++)
output_shape.dim(1 + d) = input_shape.dim(1 + d) * _strides.dim(d) + kernel_shape.dim(d) -
- _strides.dim(d) - (_paddingBefore.at(d) + _paddingAfter.at(d));
+ _strides.dim(d) - (_padding_before.at(d) + _padding_after.at(d));
break;
default:
assert(false && "invalid padding type");
assert(kernel_shape.rank() == 4);
assert(input_shape.dim(3) == kernel_shape.dim(2));
assert(_strides.rank() == 2);
- assert(_paddingBefore.size() == 2);
- assert(_paddingAfter.size() == 2);
+ assert(_padding_before.size() == 2);
+ assert(_padding_after.size() == 2);
Shape output_shape(4);
// Height and width.
for (int i = 0; i < 2; i++)
{
- auto padded_input = input_shape.dim(1 + i) + _paddingBefore[i] + _paddingAfter[i];
+ auto padded_input = input_shape.dim(1 + i) + _padding_before[i] + _padding_after[i];
// out_size = ceil((in_size - kernel_size + 1) / stride) =
// (in_size - kernel_size + 1 + stride - 1) / stride =
// (in_size - kernel_size) / stride + 1
for (size_t i = 0; i < getNumInputs(); i++)
{
const auto ¤t_shape = getInputShape(i);
- _needsBroadcast = _needsBroadcast || max_shape != current_shape; // check not equal
+ _needs_broadcast = _needs_broadcast || max_shape != current_shape; // check not equal
const int rank = current_shape.rank();
for (int axis = 0; axis < rank; axis++)
{
auto &input_shape = getInputShape(0);
assert(input_shape.rank() == 4);
- assert(_windowShape.rank() == 2);
+ assert(_window_shape.rank() == 2);
assert(_strides.rank() == 2);
- assert(_paddingBefore.size() == 2);
- assert(_paddingAfter.size() == 2);
+ assert(_padding_before.size() == 2);
+ assert(_padding_after.size() == 2);
Shape output_shape(4);
for (int i = 0; i < 2; i++)
{
- auto padded_input = input_shape.dim(1 + i) + _paddingBefore.at(i) + _paddingAfter.at(i);
+ auto padded_input = input_shape.dim(1 + i) + _padding_before.at(i) + _padding_after.at(i);
// out_size = ceil((in_size - window_size + 1) / stride) =
// (in_size - window_size + 1 + stride - 1) / stride =
// (in_size - window_size) / stride + 1
- output_shape.dim(1 + i) = (padded_input - _windowShape.dim(i)) / _strides.dim(i) + 1;
+ output_shape.dim(1 + i) = (padded_input - _window_shape.dim(i)) / _strides.dim(i) + 1;
}
for (int i = 0; i < output_shape.rank(); i++)
{
TransposeOp::TransposeOp(Output *arg, std::vector<std::size_t> axis_order)
- : Operation(Type::transpose, {arg}), _axisOrder(std::move(axis_order))
+ : Operation(Type::transpose, {arg}), _axis_order(std::move(axis_order))
{
- assert(_axisOrder.size() == static_cast<std::size_t>(getInputShape(0).rank()));
+ assert(_axis_order.size() == static_cast<std::size_t>(getInputShape(0).rank()));
inferOutputShapes();
}
{
auto &input_shape = getInputShape(0);
Shape output_shape(input_shape.rank());
- for (std::size_t i = 0; i < _axisOrder.size(); ++i)
+ for (std::size_t i = 0; i < _axis_order.size(); ++i)
output_shape.dim(static_cast<int32_t>(i)) =
- input_shape.dim(static_cast<int32_t>(_axisOrder.at(i)));
+ input_shape.dim(static_cast<int32_t>(_axis_order.at(i)));
setOutputShape(0, output_shape);
}
auto in2 = g->replaceWithInputNode(n2);
- std::vector<ops::InputOp *> expectedInputs{dynamic_cast<ops::InputOp *>(n1), in2};
- ASSERT_EQ(g->getInputs(), expectedInputs);
+ std::vector<ops::InputOp *> expected_inputs{dynamic_cast<ops::InputOp *>(n1), in2};
+ ASSERT_EQ(g->getInputs(), expected_inputs);
delete g;
}
}
TEST(Operation, ConcatAxisTest)
{
- Shape inShape{1, 2, 3};
+ Shape in_shape{1, 2, 3};
- ops::InputOp input1(inShape), input2(inShape);
+ ops::InputOp input1(in_shape), input2(in_shape);
ops::ConcatOp op_1({input1.getOutput(0), input2.getOutput(0)}, 1);
ASSERT_EQ(op_1.getAxis(), 1);