Revise computing instructions of operator on model parser (#2060)
author김용섭/동작제어Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Tue, 24 Jul 2018 06:15:16 +0000 (15:15 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 24 Jul 2018 06:15:16 +0000 (15:15 +0900)
Revise computing instructions of operator on model parser.
To do it, intrudece Operation class which has each instruction members
and _OperationComputeMethod, internal class, which compute for each
operator type.

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
tools/tflitefile_tool/operation.py [new file with mode: 0755]
tools/tflitefile_tool/operator_counter.py [deleted file]
tools/tflitefile_tool/operator_parser.py
tools/tflitefile_tool/operator_wrapping.py
tools/tflitefile_tool/perf_predictor.py

diff --git a/tools/tflitefile_tool/operation.py b/tools/tflitefile_tool/operation.py
new file mode 100755 (executable)
index 0000000..77fc5db
--- /dev/null
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+import tflite.Conv2DOptions
+import tflite.Pool2DOptions
+import tflite.BuiltinOptions
+import tflite.Tensor
+from tensor_wrapping import Tensor
+import math
+'''
+NOTICE
+- an internal class. do not import outside this file.
+- REF: https://stackoverflow.com/questions/551038/private-implementation-class-in-python
+'''
+
+
+class _OperationComputeMethod(object):
+    '''
+    NOTE: How to count operations of convolution(and also pooling)?
+
+    If we know operations of output's one element, we can calculate total output's operations.
+    For example, consider output Shape[3,3]
+    [ e11 e12 e13 ]
+    [ e21 e22 e23 ]
+    [ e31 e32 e33 ]
+    If we know operations for calculation of e11, we can know total operations of output(e11, e12, ... e33)
+    by operations of e11 * 9(total number of elements)
+
+    So we only need to know how to calculate operations of e11.
+    For this, just think how to conv operation to the output's element
+    If input_channel is 1, we can only think of kernel_size(kernel_w and kernel_h).
+    For example, consider input Shape[3,3] and kernel Shape[2,2]
+    [ i11 i12 i13 ]   [ k11 k12 ]   [ o11 o12 o13 ]
+    [ i21 i22 i23 ] * [ k21 k22 ] = [ o21 o22 o23 ]
+    [ i31 i32 i33 ]                 [ o31 o32 o33 ]
+
+    Conv operation: for o11, i11 * k11 + i21 * k21 + i12 * k12 + i22 * k22 = o11
+    On above conv operation, mul operations are done at 4 times(== kernel_w * kernel_h)
+    and add operations are dont at 3 times(== kernel_w * kernel_h - 1)
+    and also, bias will be done and it will be counted on add operations.
+
+    Anyway, we can calculate total operations on this way. This can apply to the way of pooling.
+    '''
+
+    def ComputeOperationForConv2D(tf_operator, inputs, outputs):
+        assert (
+            tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+            .Conv2DOptions)
+
+        # NOTE: Assume that conv2d operator always take 3 tensors as inputs
+        #       and both width and height are the same.
+        # operator_inputs[]: [input_tensor, weight_tensor, bias_tensor]
+        # operator_outputs[]: [output_tensor]
+        # tflite's tensor shape: [N,H,W,C]
+        input_tensor = inputs[0].tf_tensor
+        weight_tensor = inputs[1].tf_tensor
+        output_tensor = outputs[0].tf_tensor
+
+        # kernel_ops = (kernel_w * kernel_h * input_channel * 2(multiply and add))
+        kernel_ops = (
+            weight_tensor.Shape(2) * weight_tensor.Shape(1) * input_tensor.Shape(3))
+
+        # total ops
+        #     = batch_size * output_channel * output_width * output_height * kernel_ops
+        total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) *
+                     output_tensor.Shape(2) * output_tensor.Shape(1))
+
+        add_instr_num = (total_ops * (kernel_ops + 1))  # bias
+        mul_instr_num = (total_ops * (kernel_ops))
+        nonlinear_instr_num = 0
+        return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+    '''
+    NOTE: Reference the comment 'NOTE' of ComputeOperationForConv2D
+    '''
+
+    def ComputeOperationForPooling(tf_operator, inputs, outputs):
+        assert (
+            tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+            .Pool2DOptions)
+
+        input_tensor = inputs[0].tf_tensor
+        output_tensor = outputs[0].tf_tensor
+
+        pool2d_options = tflite.Pool2DOptions.Pool2DOptions()
+        pool2d_options.Init(tf_operator.BuiltinOptions().Bytes,
+                            tf_operator.BuiltinOptions().Pos)
+
+        # kernel_ops = kernel_w * kernel_h
+        kernel_ops = (pool2d_options.FilterWidth() * pool2d_options.FilterHeight())
+
+        # total ops
+        #     = batch_size * output_channel * output_width * output_height *
+        #       kernel_ops(kernel_w * kernel_h)
+        total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) *
+                     output_tensor.Shape(2) * output_tensor.Shape(1))
+
+        add_instr_num = (total_ops * kernel_ops - 1)
+        mul_instr_num = (total_ops * kernel_ops)
+        nonlinear_instr_num = 0
+        return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+    def ComputeOperationForSoftmax(tf_operator, inputs, outputs):
+        assert (
+            tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+            .SoftmaxOptions)
+
+        input_tensor = inputs[0].tf_tensor
+
+        batch_size = input_tensor.Shape(0)
+        input_dim = input_tensor.Shape(1)
+
+        # Softmax(x_i) = exp(x_i) / sum of exp(x)
+        add_instr_num = input_dim - 1  # sum of exp(x)
+        mul_instr_num = input_dim  # /
+        nonlinear_instr_num = input_dim + input_dim  # sum of exp(x) and exp(x_i)
+        return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+    def ComputeOperationForFullyConnected(tf_operator, inputs, outputs):
+        assert (
+            tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+            .FullyConnectedOptions)
+
+        # NOTE: Assume that fully_connected operator always take 3 tensors as inputs
+        #       and its X tensor's shape is [1, 1, 1, input_dim] with
+        #       its output Y [1, output_dim]
+        input_tensor = inputs[0].tf_tensor
+        output_tensor = outputs[0].tf_tensor
+
+        # ops_per_element
+        #     = input_dim(multiplication) + input_dim-1(addition) + 1(bias)
+        # total_ops
+        #     = ops_per_elem * output_dim
+
+        add_instr_num = (input_tensor.Shape(3) * output_tensor.Shape(1))
+        mul_instr_num = (input_tensor.Shape(3) * output_tensor.Shape(1))
+        nonlinear_instr_num = 0
+        return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+    def ComputeOperationForNothing(tf_operator, inputs, outputs):
+        add_instr_num = 0
+        mul_instr_num = 0
+        nonlinear_instr_num = 0
+        return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+    def NYI_ComputeOperation(tf_operator, inputs, outputs):
+        pass
+
+    operation_to_method_map = {
+        # Inceptionv3
+        "CONV_2D": ComputeOperationForConv2D,
+        "AVERAGE_POOL_2D": ComputeOperationForPooling,
+        "MAX_POOL_2D": ComputeOperationForPooling,
+        "SOFTMAX": ComputeOperationForSoftmax,
+        "FULLY_CONNECTED": ComputeOperationForFullyConnected,
+        "CONCATENATION": ComputeOperationForNothing,
+        # ADAS
+        "TOPK_V2": NYI_ComputeOperation,
+        "SUB": NYI_ComputeOperation,
+        "STRIDED_SLICE": NYI_ComputeOperation,
+        "RESHAPE": NYI_ComputeOperation,
+        "GATHER": NYI_ComputeOperation,
+        "RESIZE_BILINEAR": NYI_ComputeOperation,
+        "CAST": NYI_ComputeOperation,
+        "ADD": NYI_ComputeOperation,
+        "MUL": NYI_ComputeOperation,
+        "DIV": NYI_ComputeOperation,
+        "CUSTOM(TensorFlowMax)": NYI_ComputeOperation,
+        "CUSTOM": NYI_ComputeOperation,
+    }
+
+
+class Operation(object):
+    def __init__(self, tf_operator, operator_str, inputs, outputs):
+        self.tf_operator = tf_operator
+        self.operator_str = operator_str
+        self.inputs = inputs
+        self.outputs = outputs
+        self.add_instr_num = 0
+        self.mul_instr_num = 0
+        self.nonlinear_instr_num = 0
+        self.can_compute = True
+        self.Compute()
+
+    def Compute(self):
+        comp_map = _OperationComputeMethod().operation_to_method_map
+        if not self.operator_str in comp_map.keys():
+            self.can_compute = False
+            return
+
+        method = comp_map[self.operator_str]
+        if method.__name__ == _OperationComputeMethod().NYI_ComputeOperation.__name__:
+            self.can_compute = False
+            return
+
+        self.add_instr_num, self.mul_instr_num, self.nonlinear_instr_num = method(
+            self.tf_operator, self.inputs, self.outputs)
+
+    def TotalInstrNum(self):
+        return (self.add_instr_num + self.mul_instr_num + self.nonlinear_instr_num)
diff --git a/tools/tflitefile_tool/operator_counter.py b/tools/tflitefile_tool/operator_counter.py
deleted file mode 100755 (executable)
index cff0537..0000000
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-
-import tflite.Conv2DOptions
-import tflite.Pool2DOptions
-import tflite.BuiltinOptions
-import tflite.Tensor
-from tensor_wrapping import Tensor
-import math
-
-
-class OperationCount(object):
-    def __init__(self, add_count=0, mul_count=0, nonlinear_count=0):
-        self.add_count = add_count
-        self.mul_count = mul_count
-        self.nonlinear_count = nonlinear_count
-
-    def Increase(self, op_count):
-        self.IncreaseAddCount(op_count.GetAddCount())
-        self.IncreaseMulCount(op_count.GetMulCount())
-        self.IncreaseNonlinearCount(op_count.GetNonlinearCount())
-
-    def IncreaseAddCount(self, add_count):
-        self.add_count = self.add_count + add_count
-
-    def IncreaseMulCount(self, mul_count):
-        self.mul_count = self.mul_count + mul_count
-
-    def IncreaseNonlinearCount(self, nonlinear_count):
-        self.nonlinear_count = self.nonlinear_count + nonlinear_count
-
-    def GetAddCount(self):
-        return self.add_count
-
-    def GetMulCount(self):
-        return self.mul_count
-
-    def GetNonlinearCount(self):
-        return self.nonlinear_count
-
-    def TotalCount(self):
-        return self.add_count + self.mul_count + self.nonlinear_count
-
-    def IsSupportedOperationCount(self):
-        return True
-
-
-class UnsupportedOperationCount(OperationCount):
-    def __init__(self):
-        OperationCount.__init__(self)
-
-    def IsSupportedOperationCount(self):
-        return False
-
-
-# NOTE: How to count operations of convolution(and also pooling)?
-#
-# If we know operations of output's one element, we can calculate total output's operations.
-# For example, consider output Shape[3,3]
-# [ e11 e12 e13 ]
-# [ e21 e22 e23 ]
-# [ e31 e32 e33 ]
-# If we know operations for calculation of e11, we can know total operations of output(e11, e12, ... e33)
-# by operations of e11 * 9(total number of elements)
-#
-# So we only need to know how to calculate operations of e11. For this, just think how to conv operation to the output's element
-# If input_channel is 1, we can only think of kernel_size(kernel_w and kernel_h).
-# For example, consider input Shape[3,3] and kernel Shape[2,2]
-# [ i11 i12 i13 ]   [ k11 k12 ]   [ o11 o12 o13 ]
-# [ i21 i22 i23 ] * [ k21 k22 ] = [ o21 o22 o23 ]
-# [ i31 i32 i33 ]                 [ o31 o32 o33 ]
-#
-# Conv operation: for o11, i11 * k11 + i21 * k21 + i12 * k12 + i22 * k22 = o11
-# On above conv operation, mul operations are done at 4 times(== kernel_w * kernel_h)
-# and add operations are dont at 3 times(== kernel_w * kernel_h - 1)
-# and also, bias will be done and it will be counted on add operations.
-#
-# Anyway, we can calculate total operations on this way. This can apply to the way of pooling.
-def CountOpsConv2D(tf_operator, inputs, outputs):
-    assert (tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
-            .Conv2DOptions)
-    # NOTE: Assume that conv2d operator always take 3 tensors as inputs
-    #       and both width and height are the same.
-    # operator_inputs[]: [input_tensor, weight_tensor, bias_tensor]
-    # operator_outputs[]: [output_tensor]
-    # tflite's tensor shape: [N,H,W,C]
-    input_tensor = inputs[0].tf_tensor
-    weight_tensor = inputs[1].tf_tensor
-    output_tensor = outputs[0].tf_tensor
-
-    # kernel_ops = (kernel_w * kernel_h * input_channel * 2(multiply and add))
-    kernel_ops = (weight_tensor.Shape(2) * weight_tensor.Shape(1) * input_tensor.Shape(3))
-
-    # total ops
-    #     = batch_size * output_channel * output_width * output_height * kernel_ops
-    total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) * output_tensor.Shape(2)
-                 * output_tensor.Shape(1))
-
-    return OperationCount(
-        (total_ops * (kernel_ops + 1)),  # bias
-        (total_ops * (kernel_ops)))
-
-
-# NOTE: Reference the comment 'NOTE' of CountOpsConv2D
-def CountOpsPooling(tf_operator, inputs, outputs):
-    assert (tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
-            .Pool2DOptions)
-    input_tensor = inputs[0].tf_tensor
-    output_tensor = outputs[0].tf_tensor
-
-    pool2d_options = tflite.Pool2DOptions.Pool2DOptions()
-    pool2d_options.Init(tf_operator.BuiltinOptions().Bytes,
-                        tf_operator.BuiltinOptions().Pos)
-
-    # kernel_ops = kernel_w * kernel_h
-    kernel_ops = (pool2d_options.FilterWidth() * pool2d_options.FilterHeight())
-
-    # total ops
-    #     = batch_size * output_channel * output_width * output_height *
-    #       kernel_ops(kernel_w * kernel_h)
-    total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) * output_tensor.Shape(2)
-                 * output_tensor.Shape(1))
-
-    return OperationCount((total_ops * kernel_ops - 1), (total_ops * kernel_ops))
-
-
-def CountOpsSoftmax(tf_operator, inputs, outputs):
-    assert (tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
-            .SoftmaxOptions)
-
-    input_tensor = inputs[0].tf_tensor
-
-    batch_size = input_tensor.Shape(0)
-    input_dim = input_tensor.Shape(1)
-
-    # Softmax(x_i) = exp(x_i) / sum of exp(x)
-    add_count = input_dim - 1  # sum of exp(x)
-    mul_count = input_dim  # /
-    nonlinear_count = input_dim + input_dim  # sum of exp(x) and exp(x_i)
-
-    return OperationCount(add_count, mul_count, nonlinear_count)
-
-
-def CountOpsFullyConnected(tf_operator, inputs, outputs):
-    assert (tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
-            .FullyConnectedOptions)
-
-    # NOTE: Assume that fully_connected operator always take 3 tensors as inputs
-    #       and its X tensor's shape is [1, 1, 1, input_dim] with
-    #       its output Y [1, output_dim]
-    input_tensor = inputs[0].tf_tensor
-    output_tensor = outputs[0].tf_tensor
-
-    # ops_per_element
-    #     = input_dim(multiplication) + input_dim-1(addition) + 1(bias)
-    # total_ops
-    #     = ops_per_elem * output_dim
-    add_count = mul_count = input_tensor.Shape(3) * output_tensor.Shape(1)
-
-    return OperationCount(add_count, mul_count)
-
-
-def CountOpsNothing(tf_operator, inputs, outputs):
-    return OperationCount()
-
-
-def CountOpsUnsupported(tf_operator, inputs, outputs):
-    return UnsupportedOperationCount()
-
-
-# TODO: can make this as a class which has above method
-ops_counters = {
-    # Inceptionv3
-    "CONV_2D": CountOpsConv2D,
-    "AVERAGE_POOL_2D": CountOpsPooling,
-    "MAX_POOL_2D": CountOpsPooling,
-    "SOFTMAX": CountOpsSoftmax,
-    "FULLY_CONNECTED": CountOpsFullyConnected,
-    "CONCATENATION": CountOpsNothing,
-
-    # ADAS
-    "TOPK_V2": CountOpsUnsupported,
-    "SUB": CountOpsUnsupported,
-    "STRIDED_SLICE": CountOpsUnsupported,
-    "RESHAPE": CountOpsUnsupported,
-    "GATHER": CountOpsUnsupported,
-    "RESIZE_BILINEAR": CountOpsUnsupported,
-    "CAST": CountOpsUnsupported,
-    "ADD": CountOpsUnsupported,
-    "MUL": CountOpsUnsupported,
-    "DIV": CountOpsUnsupported,
-    "CUSTOM(TensorFlowMax)": CountOpsUnsupported,
-    "CUSTOM": CountOpsUnsupported,
-}
index ac880c6..360e9c0 100755 (executable)
@@ -7,30 +7,7 @@ import tflite.OperatorCode
 import tflite.BuiltinOperator
 from operator_wrapping import Operator, SetBuiltinOpcodeStr, BuiltinOpcodeStrList
 from tensor_wrapping import Tensor, SetTensorTypeStr
-from operator_counter import OperationCount, UnsupportedOperationCount
-
-
-class TypesCounter(object):
-    def __init__(self, op):
-        self.type_count = 0
-        if op.IsSupportedOperationCount():
-            self.op_count = OperationCount()
-        else:
-            self.op_count = UnsupportedOperationCount()
-
-    def Update(self, op):
-        self.type_count = self.type_count + 1
-        if self.op_count.IsSupportedOperationCount():
-            self.op_count.Increase(op.GetOpCount())
-
-    def GetTypeCount(self):
-        return self.type_count
-
-    def GetOpCount(self):
-        return self.op_count
-
-    def IsSupportedOperationCount(self):
-        return self.op_count.IsSupportedOperationCount()
+from operation import Operation
 
 
 class OperatorParser(object):
@@ -38,8 +15,8 @@ class OperatorParser(object):
         self.tf_model = tf_model
         self.tf_subgraph = tf_subgraph
         self.perf_predictor = perf_predictor
-        self.operators = list()
-        self.op_types = dict()
+        self.operators_in_list = list()
+        self.operators_per_type = dict()
         # Built-in operator string table
         SetBuiltinOpcodeStr()
         # Tensor type string table
@@ -54,10 +31,7 @@ class OperatorParser(object):
 
             op = Operator(operator_idx, tf_operator, input_tensors, output_tensors,
                           opcode_str)
-            op.CountOperations()
-            self.operators.append(op)
-
-            self.CountOperator(op)
+            self.AppendOperator(op)
 
     def GetOpcodeStr(self, tf_operator):
         opcode_list_idx = tf_operator.OpcodeIndex()
@@ -70,12 +44,6 @@ class OperatorParser(object):
             opcode_str = opcode_str + "(" + custom_op_name + ")"
         return opcode_str
 
-    def CountOperator(self, op):
-        opcode_str = op.GetOpcodeStr()
-        if opcode_str not in self.op_types:
-            self.op_types[opcode_str] = TypesCounter(op)
-        self.op_types[opcode_str].Update(op)
-
     def GetInputTensors(self, tf_operator):
         operator_inputs = tf_operator.InputsAsNumpy()
         return self.GetTensors(operator_inputs)
@@ -93,35 +61,52 @@ class OperatorParser(object):
             return_list.append(Tensor(tensor_idx, tf_tensor, tf_buffer))
         return return_list
 
+    def AppendOperator(self, operator):
+        self.operators_in_list.append(operator)
+
+        opcode_str = operator.opcode_str
+        if opcode_str not in self.operators_per_type:
+            self.operators_per_type[opcode_str] = list()
+        self.operators_per_type[opcode_str].append(operator)
+
     def PrintAll(self):
         print('')
-        self.PrintAllOperators()
+        self.PrintAllOperatorsInList()
         print('')
-        self.PrintAllOperatorTypes()
+        self.PrintAllTypesInfo()
         print('')
 
-    def PrintAllOperators(self):
-        for operator in self.operators:
+    def PrintAllOperatorsInList(self):
+        for operator in self.operators_in_list:
             operator.PrintInfo(self.perf_predictor)
             print('')
 
-    def PrintAllOperatorTypes(self):
-        print("Number of operator types: {0}".format(len(self.op_types)))
+    def PrintAllTypesInfo(self):
+        print("Number of all operator types: {0}".format(len(self.operators_per_type)))
+
+        # number of instructions of all operator types
+        total_instrs = 0
 
-        total_opstr_count = 0
-        total_op_count = 0
-        for opstr, type_counter in self.op_types.items():
-            supported = type_counter.IsSupportedOperationCount()
+        # (a string of the operator type, a list of operators which are the same operator type)
+        for type_str, oper_list in self.operators_per_type.items():
+            # this operator type can be computed?
+            can_compute = oper_list[0].operation.can_compute
 
-            opstr_count = type_counter.GetTypeCount()
-            op_count = type_counter.GetOpCount().TotalCount()
+            # number of occurrence of this operator type
+            occur = len(oper_list)
 
-            print("\t{0:38}: {1:4} \t (total_ops: {2})".format(
-                opstr, opstr_count, "{:,}".format(op_count) if supported else "???"))
+            # total number of instructions of the same operator types
+            if can_compute:
+                instrs = sum(operator.operation.TotalInstrNum() for operator in oper_list)
+                total_instrs = total_instrs + instrs
+                instrs = "{:,}".format(instrs)
+            else:
+                instrs = "???"
 
-            total_opstr_count = total_opstr_count + opstr_count
-            total_op_count = (total_op_count + op_count) if supported else total_op_count
+            print("\t{type_str:38}: {occur:4} \t (instrs: {instrs})".format(
+                type_str=type_str, occur=occur, instrs=instrs))
 
-        total_op_count = "{:,}".format(total_op_count)
-        print("{0:46}: {1:4} \t (total_ops: {2})".format(
-            "Total Number of operators", total_opstr_count, total_op_count))
+        total_instrs = "{:,}".format(total_instrs)
+        print("{0:46}: {1:4} \t (total instrs: {2})".format("Number of all operators",
+                                                            len(self.operators_in_list),
+                                                            total_instrs))
index b7f5eb9..6802daa 100755 (executable)
@@ -4,7 +4,7 @@ import tflite.Operator
 import tflite.OperatorCode
 import tflite.BuiltinOperator
 from tensor_wrapping import Tensor
-from operator_counter import OperationCount, UnsupportedOperationCount, ops_counters
+from operation import Operation
 from perf_predictor import PerfPredictor
 
 BuiltinOpcodeStrList = {}
@@ -86,20 +86,21 @@ class Operator(object):
         self.inputs = input_tensors
         self.outputs = output_tensors
         self.opcode_str = opcode_str
-        self.op_count = None  # OperationCount
+        self.operation = Operation(self.tf_operator, self.opcode_str, self.inputs,
+                                   self.outputs)
 
     def PrintInfo(self, perf_predictor=None):
-        supported = self.IsSupportedOperationCount()
+        # total instruction num
+        instrs = "{:,}".format(
+            self.operation.TotalInstrNum()) if self.operation.can_compute else "???"
 
-        # total op counts
-        counts = "{:,}".format(self.op_count.TotalCount()) if supported else "???"
+        # total operation cycles
+        cycles = "{:,}".format(
+            (perf_predictor.PredictCycles(self.operation)
+             )) if self.operation.can_compute and perf_predictor != None else "???"
 
-        # total op cycles
-        cycles = "{:,}".format((perf_predictor.PredictCycles(
-            self.op_count))) if supported and perf_predictor != None else "???"
-
-        print("Operator {0}: {1} (ops: {2}, cycls: {3})".format(
-            self.operator_idx, self.opcode_str, counts, cycles))
+        print("Operator {0}: {1} (instrs: {2}, cycls: {3})".format(
+            self.operator_idx, self.opcode_str, instrs, cycles))
 
         self.PrintOptionInfo()
 
@@ -111,7 +112,11 @@ class Operator(object):
             tensor.PrintInfo("\t\t")
 
     def PrintOptionInfo(self):
-        options = GetBuiltinOptions(self.tf_operator)
+        # FIXME: workaround for ops such as custom
+        try:
+            options = GetBuiltinOptions(self.tf_operator)
+        except KeyError:
+            return
 
         # fused activation function
         try:
@@ -122,21 +127,3 @@ class Operator(object):
         except AttributeError:
             # This operator does not support FusedActivationFunction
             pass
-
-    def CountOperations(self):
-        opcode_str = self.opcode_str
-        # FIXME: if there would be a class for ops_counters, we can delete this
-        if not opcode_str in ops_counters:
-            self.op_count = UnsupportedOperationCount()
-        else:
-            self.op_count = ops_counters[opcode_str](self.tf_operator, self.inputs,
-                                                     self.outputs)
-
-    def GetOpcodeStr(self):
-        return self.opcode_str
-
-    def GetOpCount(self):
-        return self.op_count
-
-    def IsSupportedOperationCount(self):
-        return self.op_count.IsSupportedOperationCount()
index 49df993..8880c8e 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 
-from operator_counter import OperationCount
+from operation import Operation
 
 
 class PerfPredictor(object):
@@ -9,7 +9,7 @@ class PerfPredictor(object):
         self.mul_cycle = mul_cycle
         self.nonlinear_cycle = nonlinear_cycle
 
-    def PredictCycles(self, op_count):
-        return (op_count.GetAddCount() * self.add_cycle +
-                op_count.GetMulCount() * self.mul_cycle +
-                op_count.GetNonlinearCount() * self.nonlinear_cycle)
+    def PredictCycles(self, operation):
+        return (operation.add_instr_num * self.add_cycle +
+                operation.mul_instr_num * self.mul_cycle +
+                operation.nonlinear_instr_num * self.nonlinear_cycle)