}
};
+/*! \brief Attribute for QNN binary operator */
+struct QnnBinaryOpAttrs : public tvm::AttrsNode<QnnBinaryOpAttrs> {
+ int32_t lhs_zero_point;
+ double lhs_scale;
+ int32_t rhs_zero_point;
+ double rhs_scale;
+ int32_t output_zero_point;
+ double output_scale;
+
+ TVM_DECLARE_ATTRS(QnnBinaryOpAttrs, "relay.attrs.QnnBinaryOpAttrs") {
+ TVM_ATTR_FIELD(lhs_zero_point)
+ .describe("The zero_point for the lhs input tensor of this op.");
+
+ TVM_ATTR_FIELD(lhs_scale)
+ .describe("The scale for the lhs input tensor of this op.");
+
+ TVM_ATTR_FIELD(rhs_zero_point)
+ .describe("The zero_point for the rhs input tensor of this op.");
+
+ TVM_ATTR_FIELD(rhs_scale)
+ .describe("The scale for the rhs input tensor of this op.");
+
+ TVM_ATTR_FIELD(output_zero_point)
+ .describe("The zero_point for the activation of this op.");
+
+ TVM_ATTR_FIELD(output_scale)
+ .describe("The scale for the activation of this op.");
+ }
+};
+
} // namespace qnn
} // namespace relay
} // namespace tvm
strides, padding, dilation,
groups, channels, kernel_size,
data_layout, kernel_layout, out_layout, out_dtype)
+
+
+def add(lhs, rhs, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point, output_scale,
+ output_zero_point):
+ """Quantized addition with numpy-style broadcasting.
+
+ Parameters
+ ----------
+ lhs : relay.Expr
+ The left hand side quantized input data.
+
+ rhs : relay.Expr
+ The right hand side quantized input data.
+
+ lhs_scale: float
+ The scale of the lhs quantized expr.
+
+ lhs_zero_point: int
+ The zero point of lhs quantized expr.
+
+ rhs_scale: float
+ The scale of the rhs quantized expr.
+
+ rhs_zero_point: int
+ The zero point of rhs quantized expr.
+
+ output_scale: float
+ The scale of the output quantized expr.
+
+ output_zero_point: int
+ The zero point of output quantized expr.
+
+ Returns
+ -------
+ result : relay.Expr
+ The computed result.
+
+ """
+ return _make.add(lhs, rhs,
+ lhs_scale, lhs_zero_point,
+ rhs_scale, rhs_zero_point,
+ output_scale, output_zero_point)
--- /dev/null
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file src/relay/qnn/op/add.cc
+ * \brief QNN add operator.
+ */
+#include <tvm/relay/analysis.h>
+#include <tvm/relay/op_attr_types.h>
+#include <tvm/relay/qnn/attrs.h>
+#include "../../pass/pattern_util.h"
+#include "../util.h"
+#include "op_common.h"
+
+namespace tvm {
+namespace relay {
+namespace qnn {
+
+/*
+ * \brief Canonicalizes the QNN add op.
+ * \param attrs The QNN concatenate attrs.
+ * \param new_args The new mutated args to the call node.
+ * \param arg_types The types of input and output.
+ * \return The sequence of Relay ops for add op.
+ */
+Expr QnnAddCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
+ const Array<tvm::relay::Type>& arg_types) {
+ // Get the attrs.
+ CHECK_EQ(new_args.size(), 2);
+ auto& lhs = new_args[0];
+ auto& rhs = new_args[1];
+ const auto* binary_op_attrs = attrs.as<QnnBinaryOpAttrs>();
+ CHECK(binary_op_attrs != nullptr);
+ auto lhs_scale = binary_op_attrs->lhs_scale;
+ auto lhs_zero_point = binary_op_attrs->lhs_zero_point;
+ auto rhs_scale = binary_op_attrs->rhs_scale;
+ auto rhs_zero_point = binary_op_attrs->rhs_zero_point;
+ auto output_scale = binary_op_attrs->output_scale;
+ auto output_zero_point = binary_op_attrs->output_zero_point;
+
+ // Get the input dtype and shape.
+ CHECK_EQ(arg_types.size(), 3);
+ auto tensor_type = arg_types[0].as<TensorTypeNode>();
+ auto input_dtype = tensor_type->dtype;
+ auto input_shape = tensor_type->shape;
+
+ // FIXME (anijain2305) - The lowering can be further optimized. Instead of inserting requantize in
+ // the start, we can insert requantize at the end if both input tensors have same qnn params. In
+ // that case, we can first add the tensors, subtract the zero point, and requantize at the end.
+ // This can be done in future.
+
+ // Since the input qnn params can be different than output qnn params, we first requantize the
+ // input tensors to the output qnn params. Then we call relay.add on the requantized inputs. This
+ // addition results in extra addition of the output zero point. We futher subtract the zero
+ // point. The whole process can be represented using following equations
+ //
+ // scale_c * (Q_c - zp_c) = scale_a * (Q_a - zp_a) + scale_b * (Q_b - zp_b)
+ //
+ // After requantizing Q_a and Q_b, equation becomes,
+ // scale_c * (Q_c - zp_c) = scale_c * (Q_a' - zp_c) + scale_c * (Q_b' - zp_c)
+ // scale_c * (Q_c - zp_c) = scale_c * (Q_a' + Q_b' - zp_c - zp_c)
+ //
+ // Comparing the LHS and RHS, it results in
+ // Q_c = Q_a' + Q_b' - zp_c
+ // The add op is done in int32 precision.
+
+ // Requantize LHS if necessary.
+ auto requantized_lhs = lhs;
+ if (lhs_scale != output_scale || lhs_zero_point != output_zero_point) {
+ requantized_lhs = Requantize(lhs, input_shape, lhs_scale, lhs_zero_point, output_scale,
+ output_zero_point, Int(32));
+ } else {
+ requantized_lhs = Cast(requantized_lhs, Int(32));
+ }
+
+ // Requantize RHS if necessary.
+ auto requantized_rhs = rhs;
+ if (rhs_scale != output_scale || rhs_zero_point != output_zero_point) {
+ requantized_rhs = Requantize(rhs, input_shape, rhs_scale, rhs_zero_point, output_scale,
+ output_zero_point, Int(32));
+ } else {
+ requantized_rhs = Cast(requantized_rhs, Int(32));
+ }
+
+ auto output = Add(requantized_lhs, requantized_rhs);
+
+ // Subtract zero point.
+ if (output_zero_point != 0) {
+ auto output_zp = MakeConstantScalar(Int(32), output_zero_point);
+ output = Subtract(output, output_zp);
+ }
+
+ // Go back to lower precision.
+ auto q_min = GetQmin(input_dtype);
+ auto q_max = GetQmax(input_dtype);
+ output = Clip(output, q_min, q_max);
+ return Cast(output, input_dtype);
+}
+
+// QNN Addition operator.
+QNN_REGISTER_BINARY_OP("add")
+.describe("Elementwise add with with broadcasting for quantized tensors.")
+.set_support_level(11)
+.set_attr<FTVMLegalize>("FTVMQnnCanonicalize", QnnAddCanonicalize);
+
+} // namespace qnn
+} // namespace relay
+} // namespace tvm
--- /dev/null
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2018 by Contributors
+ * \file src/relay/qnn/op/op_common.h
+ * \brief A set of utilities and common functionality for QNN ops.
+ */
+#ifndef TVM_RELAY_QNN_OP_OP_COMMON_H_
+#define TVM_RELAY_QNN_OP_OP_COMMON_H_
+
+#include <tvm/relay/expr.h>
+#include <tvm/relay/op.h>
+#include <tvm/relay/op_attr_types.h>
+#include <tvm/relay/qnn/attrs.h>
+#include <vector>
+#include "../../op/type_relations.h"
+
+namespace tvm {
+namespace relay {
+namespace qnn {
+
+/*! Quick helper macro
+ * - Expose a positional make function to construct the node.
+ * - Register op to the registry.
+ *
+ * We make the decision to always only expose positional argument.
+ * We will do rewrapping in the frontend to support language
+ * sugars such as keyword arguments and default value.
+ *
+ * \param OpName the name of registry.
+ */
+#define QNN_REGISTER_BINARY_OP(OpName) \
+ TVM_REGISTER_API("relay.qnn.op._make." OpName) \
+ .set_body_typed<Expr(Expr, Expr, double, int32_t, double, int32_t, double, int32_t)>( \
+ [](Expr lhs, Expr rhs, double lhs_scale, int32_t lhs_zero_point, double rhs_scale, \
+ int32_t rhs_zero_point, double output_scale, int32_t output_zero_point) { \
+ auto attrs = make_node<QnnBinaryOpAttrs>(); \
+ attrs->lhs_scale = lhs_scale; \
+ attrs->lhs_zero_point = lhs_zero_point; \
+ attrs->rhs_scale = rhs_scale; \
+ attrs->rhs_zero_point = rhs_zero_point; \
+ attrs->output_scale = output_scale; \
+ attrs->output_zero_point = output_zero_point; \
+ static const Op& op = Op::Get("qnn." OpName); \
+ return CallNode::make(op, {lhs, rhs}, Attrs(attrs), {}); \
+ }); \
+ RELAY_REGISTER_OP("qnn." OpName) \
+ .set_num_inputs(2) \
+ .add_argument("lhs", "Tensor", "The left hand side quantized tensor.") \
+ .add_argument("rhs", "Tensor", "The right hand side quantized tensor.") \
+ .add_type_rel("Broadcast", BroadcastRel)
+
+} // namespace qnn
+} // namespace relay
+} // namespace tvm
+
+#endif // TVM_RELAY_QNN_OP_OP_COMMON_H_
--- /dev/null
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import tvm
+import numpy as np
+from tvm import relay
+from tvm.contrib import graph_runtime
+import topi.testing
+
+def test_tflite_same_io_qnn_params():
+ data_dtype = 'uint8'
+
+ x = relay.var("x", shape=(1, 4), dtype=data_dtype)
+ y = relay.var("y", shape=(1, 4), dtype=data_dtype)
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.00784314,
+ lhs_zero_point=127,
+ rhs_scale=0.00784314,
+ rhs_zero_point=127,
+ output_scale=0.00784314,
+ output_zero_point=127)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_datas = [np.array((140, 153, 165, 178)).reshape((1,4)),
+ np.array((25, 153, 178, 216)).reshape((1,4)),
+ np.array((25, 153, 216, 165)).reshape((1,4))]
+ y_datas = [np.array((204, 178, 165, 140)).reshape((1,4)),
+ np.array((204, 178, 191, 25)).reshape((1,4)),
+ np.array((204, 178, 25, 191)).reshape((1,4))]
+ golden_outputs = [np.array((217,204,203,191)).reshape((1, 4)),
+ np.array((102, 204, 242, 114)).reshape((1,4)),
+ np.array((102, 204, 114, 229)).reshape((1,4))]
+
+ for i in range(0, 3):
+ x_data = x_datas[i]
+ y_data = y_datas[i]
+ golden_output = golden_outputs[i]
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+
+def test_tflite_different_io_qnn_params():
+ data_dtype = 'uint8'
+
+ x = relay.var("x", shape=(1, 4), dtype=data_dtype)
+ y = relay.var("y", shape=(1, 4), dtype=data_dtype)
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.0156863,
+ lhs_zero_point=127,
+ rhs_scale=0.0117647,
+ rhs_zero_point=85,
+ output_scale=0.0235294,
+ output_zero_point=128)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_datas = [np.array((76, 140, 153, 172)).reshape((1,4)),
+ np.array((133, 140, 146, 153)).reshape((1,4)),
+ np.array((76, 140, 172, 146)).reshape((1,4))]
+ y_datas = [np.array((136, 119, 128, 17)).reshape((1,4)),
+ np.array((136, 119, 111, 94)).reshape((1,4)),
+ np.array((136, 119, 17, 128)).reshape((1,4))]
+ golden_outputs = [np.array((120, 154, 167, 124)).reshape((1, 4)),
+ np.array((158, 154, 154, 150)).reshape((1,4)),
+ np.array((120, 154, 124, 163)).reshape((1,4))]
+
+ for i in range(0, 3):
+ x_data = x_datas[i]
+ y_data = y_datas[i]
+ golden_output = golden_outputs[i]
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+
+def test_saturation():
+ # Same params
+ data_dtype = 'uint8'
+ x = relay.var("x", shape=(1, 4), dtype=data_dtype)
+ y = relay.var("y", shape=(1, 4), dtype=data_dtype)
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.125,
+ lhs_zero_point=0,
+ rhs_scale=0.125,
+ rhs_zero_point=0,
+ output_scale=0.125,
+ output_zero_point=0)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_data = np.array((255, 1, 1, 0)).reshape((1,4))
+ y_data = np.array((255, 255, 128, 0)).reshape((1,4))
+ golden_output = np.array((255, 255, 129, 0)).reshape((1, 4))
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+ # Same params, different scale
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.125,
+ lhs_zero_point=0,
+ rhs_scale=0.125,
+ rhs_zero_point=0,
+ output_scale=0.25,
+ output_zero_point=0)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_data = np.array((255, 1, 1, 0)).reshape((1,4))
+ y_data = np.array((255, 255, 127, 0)).reshape((1,4))
+ golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+ # Same io params, different output scale
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.125,
+ lhs_zero_point=0,
+ rhs_scale=0.125,
+ rhs_zero_point=0,
+ output_scale=0.25,
+ output_zero_point=0)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_data = np.array((255, 1, 1, 0)).reshape((1,4))
+ y_data = np.array((255, 255, 127, 0)).reshape((1,4))
+ golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+ # All params different
+ z = relay.qnn.op.add(lhs=x, rhs=y,
+ lhs_scale=0.5,
+ lhs_zero_point=0,
+ rhs_scale=0.25,
+ rhs_zero_point=0,
+ output_scale=0.125,
+ output_zero_point=0)
+
+ func = relay.Function([x, y], z)
+ mod = relay.Module.from_expr(func)
+ mod = relay.qnn.transform.CanonicalizeOps()(mod)
+ func = mod["main"]
+
+ x_data = np.array((255, 0, 1, 0)).reshape((1,4))
+ y_data = np.array((0, 128, 64, 0)).reshape((1,4))
+ golden_output = np.array((255, 255, 132, 0)).reshape((1, 4))
+
+ intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm")
+ op_res = intrp.evaluate(func)(x_data, y_data)
+ np.testing.assert_equal(op_res.asnumpy(), golden_output)
+
+
+if __name__ == '__main__':
+ test_tflite_same_io_qnn_params()
+ test_tflite_different_io_qnn_params()
+ test_saturation()