Anyway, we can calculate total operations on this way. This can apply to the way of pooling.
'''
+ @staticmethod
def ComputeOperationForConv2D(tf_operator, inputs, outputs):
assert (
tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
nonlinear_instr_num = 0
return (add_instr_num, mul_instr_num, nonlinear_instr_num)
- '''
- NOTE: Reference the comment 'NOTE' of ComputeOperationForConv2D
- '''
+ # NOTE: Reference the comment 'NOTE' of ComputeOperationForConv2D
+ @staticmethod
def ComputeOperationForPooling(tf_operator, inputs, outputs):
assert (
tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
.Pool2DOptions)
- input_tensor = inputs[0].tf_tensor
+ dummy_input_tensor = inputs[0].tf_tensor
output_tensor = outputs[0].tf_tensor
pool2d_options = tflite.Pool2DOptions.Pool2DOptions()
nonlinear_instr_num = 0
return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+ @staticmethod
def ComputeOperationForSoftmax(tf_operator, inputs, outputs):
assert (
tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
input_tensor = inputs[0].tf_tensor
- batch_size = input_tensor.Shape(0)
+ dummy_batch_size = input_tensor.Shape(0)
input_dim = input_tensor.Shape(1)
# Softmax(x_i) = exp(x_i) / sum of exp(x)
nonlinear_instr_num = input_dim + input_dim # sum of exp(x) and exp(x_i)
return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+ @staticmethod
def ComputeOperationForFullyConnected(tf_operator, inputs, outputs):
assert (
tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
nonlinear_instr_num = 0
return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+ @staticmethod
def ComputeOperationForNothing(tf_operator, inputs, outputs):
add_instr_num = 0
mul_instr_num = 0
nonlinear_instr_num = 0
return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+ @staticmethod
def NYI_ComputeOperation(tf_operator, inputs, outputs):
pass