[model_parser] Hide instruction counts by verbose option (#3487)
author김용섭/동작제어Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Wed, 7 Nov 2018 03:12:34 +0000 (12:12 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 7 Nov 2018 03:12:34 +0000 (12:12 +0900)
Hides instruction counts by verbose option. Now -v or --verbose can skip
printing instruction counts. You can use this verbose option as follows
- With verbose level 0, prints only summary of tflite
- With verbose level 1(default), prints all like current default except for
instructions
- With verbose level 2, prints all like current default including for
instructions

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
tools/tflitefile_tool/model_parser.py
tools/tflitefile_tool/model_printer.py [new file with mode: 0644]
tools/tflitefile_tool/operator_parser.py
tools/tflitefile_tool/operator_printer.py [new file with mode: 0644]
tools/tflitefile_tool/operator_wrapping.py
tools/tflitefile_tool/tensor_printer.py [new file with mode: 0644]
tools/tflitefile_tool/tensor_wrapping.py

index b90fd79..7c5971a 100755 (executable)
@@ -28,6 +28,7 @@ import tflite.Model
 import tflite.SubGraph
 import argparse
 from operator_parser import OperatorParser
+from model_printer import ModelPrinter
 from perf_predictor import PerfPredictor
 
 
@@ -37,7 +38,6 @@ class TFLiteModelFileParser(object):
         self.tflite_file = args.input_file
 
         # Set print level (0 ~ 2)
-        # TODO: print information based on level
         self.print_level = args.verbose
         if (args.verbose > 2):
             self.print_level = 2
@@ -72,6 +72,9 @@ class TFLiteModelFileParser(object):
                 for operator_index in args.operator:
                     self.print_operator_index.append(int(operator_index))
 
+    def PrintModel(self, model_name, op_parser):
+        ModelPrinter(self.print_level, op_parser, model_name).PrintAll()
+
     def main(self):
         # Generate Model: top structure of tflite model file
         buf = self.tflite_file.read()
@@ -86,19 +89,12 @@ class TFLiteModelFileParser(object):
             if (subgraph_index != 0):
                 model_name = "Model #" + str(subgraph_index)
 
-            print("[" + model_name + "]\n")
-
-            # Model inputs & outputs
-            model_inputs = tf_subgraph.InputsAsNumpy()
-            model_outputs = tf_subgraph.OutputsAsNumpy()
-
-            print(model_name + " input tensors: " + str(model_inputs))
-            print(model_name + " output tensors: " + str(model_outputs))
-
-            # Parse Operators and print all of operators
+            # Parse Operators
             op_parser = OperatorParser(tf_model, tf_subgraph, PerfPredictor())
             op_parser.Parse()
-            op_parser.PrintAll()
+
+            # print all of operators or requested objects
+            self.PrintModel(model_name, op_parser)
 
 
 if __name__ == '__main__':
@@ -107,11 +103,7 @@ if __name__ == '__main__':
     arg_parser.add_argument(
         "input_file", type=argparse.FileType('rb'), help="tflite file to read")
     arg_parser.add_argument(
-        '-v',
-        '--verbose',
-        action='count',
-        default=0,
-        help="set print level (0~2, default: 0)")
+        '-v', '--verbose', type=int, default=1, help="set print level (0~2, default: 1)")
     arg_parser.add_argument(
         '-t', '--tensor', nargs='*', help="tensor ID to print information (default: all)")
     arg_parser.add_argument(
diff --git a/tools/tflitefile_tool/model_printer.py b/tools/tflitefile_tool/model_printer.py
new file mode 100644 (file)
index 0000000..019f68e
--- /dev/null
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from operator_printer import OperatorPrinter
+
+
+class ModelPrinter(object):
+    def __init__(self, verbose, op_parser, model_name):
+        self.verbose = verbose
+        self.op_parser = op_parser
+        self.model_name = model_name
+
+    def PrintAll(self):
+        self.PrintModelInfo()
+        self.PrintAllOperatorsInList()
+        self.PrintAllTypesInfo()
+
+    def PrintModelInfo(self):
+        print("[" + self.model_name + "]\n")
+        if self.verbose > 0:
+            model_inputs = self.op_parser.tf_subgraph.InputsAsNumpy()
+            model_outputs = self.op_parser.tf_subgraph.OutputsAsNumpy()
+            print(self.model_name + " input tensors: " + str(model_inputs))
+            print(self.model_name + " output tensors: " + str(model_outputs))
+        print('')
+
+    def PrintAllOperatorsInList(self):
+        if (self.verbose < 1):
+            return
+
+        for operator in self.op_parser.operators_in_list:
+            printer = OperatorPrinter(self.verbose, operator)
+            printer.PrintInfo(self.op_parser.perf_predictor)
+            print('')
+
+        print('')
+
+    def PrintAllTypesInfo(self):
+        print("Number of all operator types: {0}".format(
+            len(self.op_parser.operators_per_type)))
+
+        # number of instructions of all operator types to print if verbose level is 2
+        total_instrs = 0
+
+        # (a string of the operator type, a list of operators which are the same operator type)
+        for type_str, oper_list in self.op_parser.operators_per_type.items():
+            # number of occurrence of this operator type
+            occur = len(oper_list)
+
+            optype_info_str = "\t{type_str:38}: {occur:4}".format(
+                type_str=type_str, occur=occur)
+
+            if self.verbose == 2:
+                # this operator type can be computed?
+                can_compute = oper_list[0].operation.can_compute
+
+                # total number of instructions of the same operator types
+                if can_compute:
+                    instrs = sum(
+                        operator.operation.TotalInstrNum() for operator in oper_list)
+                    total_instrs = total_instrs + instrs
+                    instrs = "{:,}".format(instrs)
+                else:
+                    instrs = "???"
+
+                optype_info_str = optype_info_str + " \t (instrs: {instrs})".format(
+                    instrs=instrs)
+
+            print(optype_info_str)
+
+        summary_str = "{0:46}: {1:4}".format("Number of all operators",
+                                             len(self.op_parser.operators_in_list))
+        if self.verbose == 2:
+            total_instrs = "{:,}".format(total_instrs)
+            summary_str = summary_str + " \t (total instrs: {0})".format(total_instrs)
+
+        print(summary_str)
+        print('')
index f8eabc6..5b080b0 100755 (executable)
@@ -83,45 +83,3 @@ class OperatorParser(object):
         if opcode_str not in self.operators_per_type:
             self.operators_per_type[opcode_str] = list()
         self.operators_per_type[opcode_str].append(operator)
-
-    def PrintAll(self):
-        print('')
-        self.PrintAllOperatorsInList()
-        print('')
-        self.PrintAllTypesInfo()
-        print('')
-
-    def PrintAllOperatorsInList(self):
-        for operator in self.operators_in_list:
-            operator.PrintInfo(self.perf_predictor)
-            print('')
-
-    def PrintAllTypesInfo(self):
-        print("Number of all operator types: {0}".format(len(self.operators_per_type)))
-
-        # number of instructions of all operator types
-        total_instrs = 0
-
-        # (a string of the operator type, a list of operators which are the same operator type)
-        for type_str, oper_list in self.operators_per_type.items():
-            # this operator type can be computed?
-            can_compute = oper_list[0].operation.can_compute
-
-            # number of occurrence of this operator type
-            occur = len(oper_list)
-
-            # total number of instructions of the same operator types
-            if can_compute:
-                instrs = sum(operator.operation.TotalInstrNum() for operator in oper_list)
-                total_instrs = total_instrs + instrs
-                instrs = "{:,}".format(instrs)
-            else:
-                instrs = "???"
-
-            print("\t{type_str:38}: {occur:4} \t (instrs: {instrs})".format(
-                type_str=type_str, occur=occur, instrs=instrs))
-
-        total_instrs = "{:,}".format(total_instrs)
-        print("{0:46}: {1:4} \t (total instrs: {2})".format("Number of all operators",
-                                                            len(self.operators_in_list),
-                                                            total_instrs))
diff --git a/tools/tflitefile_tool/operator_printer.py b/tools/tflitefile_tool/operator_printer.py
new file mode 100644 (file)
index 0000000..779f6de
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from operator_wrapping import Operator
+from tensor_printer import TensorPrinter
+from perf_predictor import PerfPredictor
+
+
+def GetStrTensorIndex(tensors):
+    return_string = "["
+    for idx in range(len(tensors)):
+        if idx != 0:
+            return_string += ", "
+        return_string += str(tensors[idx].tensor_idx)
+    return_string += "]"
+    return return_string
+
+
+class OperatorPrinter(object):
+    def __init__(self, verbose, operator):
+        self.verbose = verbose
+        self.operator = operator
+
+    def PrintInfo(self, perf_predictor=None):
+        if (self.verbose < 1):
+            pass
+
+        op_str = "Operator {0}: {1}".format(self.operator.operator_idx,
+                                            self.operator.opcode_str)
+
+        if self.verbose == 2:
+            # total instruction num
+            instrs = "{:,}".format(self.operator.operation.TotalInstrNum()
+                                   ) if self.operator.operation.can_compute else "???"
+
+            # total operation cycles
+            cycles = "{:,}".format(
+                (perf_predictor.PredictCycles(self.operator.operation))
+            ) if self.operator.operation.can_compute and perf_predictor != None else "???"
+
+            op_str = op_str + "(instrs: {0}, cycls: {1})".format(instrs, cycles)
+
+        print(op_str)
+
+        print("\tFused Activation: " + self.operator.fused_activation)
+
+        print("\tInput Tensors" + GetStrTensorIndex(self.operator.inputs))
+        for tensor in self.operator.inputs:
+            TensorPrinter(self.verbose, tensor).PrintInfo("\t\t")
+        print("\tOutput Tensors" + GetStrTensorIndex(self.operator.outputs))
+        for tensor in self.operator.outputs:
+            TensorPrinter(self.verbose, tensor).PrintInfo("\t\t")
index 2b03d5e..55d9172 100755 (executable)
@@ -18,9 +18,7 @@ import tflite.Operator
 import tflite.OperatorCode
 import tflite.BuiltinOperator
 import tflite.ActivationFunctionType
-from tensor_wrapping import Tensor
 from operation import Operation
-from perf_predictor import PerfPredictor
 
 
 # Match enum value integer to name string
@@ -44,16 +42,6 @@ class EnumStrMaps():
     BuiltinOptions = BuildEnumClassStrMap(tflite.BuiltinOptions.BuiltinOptions())
 
 
-def GetStrTensorIndex(tensors):
-    return_string = "["
-    for idx in range(len(tensors)):
-        if idx != 0:
-            return_string += ", "
-        return_string += str(tensors[idx].tensor_idx)
-    return_string += "]"
-    return return_string
-
-
 def GetAttribute(o, *args):
     import functools
     return functools.reduce(getattr, args, o)
@@ -98,30 +86,10 @@ class Operator(object):
         self.opcode_str = opcode_str
         self.operation = Operation(self.tf_operator, self.opcode_str, self.inputs,
                                    self.outputs)
+        self.fused_activation = "NONE"
+        self.SetupFusedActivation()
 
-    def PrintInfo(self, perf_predictor=None):
-        # total instruction num
-        instrs = "{:,}".format(
-            self.operation.TotalInstrNum()) if self.operation.can_compute else "???"
-
-        # total operation cycles
-        cycles = "{:,}".format(
-            (perf_predictor.PredictCycles(self.operation)
-             )) if self.operation.can_compute and perf_predictor != None else "???"
-
-        print("Operator {0}: {1} (instrs: {2}, cycls: {3})".format(
-            self.operator_idx, self.opcode_str, instrs, cycles))
-
-        self.PrintOptionInfo()
-
-        print("\tInput Tensors" + GetStrTensorIndex(self.inputs))
-        for tensor in self.inputs:
-            tensor.PrintInfo("\t\t")
-        print("\tOutput Tensors" + GetStrTensorIndex(self.outputs))
-        for tensor in self.outputs:
-            tensor.PrintInfo("\t\t")
-
-    def PrintOptionInfo(self):
+    def SetupFusedActivation(self):
         # FIXME: workaround for ops such as custom
         try:
             options = OptionLoader.GetBuiltinOptions(
@@ -132,8 +100,7 @@ class Operator(object):
         # fused activation function
         try:
             activation_code = options.FusedActivationFunction()
-            fused_activation = EnumStrMaps.ActivationFunctionType[activation_code]
-            print("\tFused Activation: " + fused_activation)
+            self.fused_activation = EnumStrMaps.ActivationFunctionType[activation_code]
         except AttributeError:
             # This operator does not support FusedActivationFunction
             pass
diff --git a/tools/tflitefile_tool/tensor_printer.py b/tools/tflitefile_tool/tensor_printer.py
new file mode 100644 (file)
index 0000000..d57b40f
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tensor_wrapping import Tensor
+
+
+class TensorPrinter(object):
+    def __init__(self, verbose, tensor):
+        self.verbose = verbose
+        self.tensor = tensor
+
+    def PrintInfo(self, depth_str=""):
+        if (self.verbose < 1):
+            pass
+
+        print_str = ""
+        if self.tensor.tensor_idx < 0:
+            print_str = "Tensor {0:4}".format(self.tensor.tensor_idx)
+        else:
+            buffer_idx = self.tensor.tf_tensor.Buffer()
+            isEmpty = "Filled"
+            if (self.tensor.tf_buffer.DataLength() == 0):
+                isEmpty = " Empty"
+            shape_str = self.GetShapeString()
+            type_name = self.tensor.tf_tensor.Type()
+
+            shape_name = ""
+            if self.tensor.tf_tensor.Name() != 0:
+                shape_name = self.tensor.tf_tensor.Name()
+
+            print_str = "Tensor {0:4} : buffer {1:4} | {2} | {3:7} | Shape {4} ({5})".format(
+                self.tensor.tensor_idx, buffer_idx, isEmpty, type_name, shape_str,
+                shape_name)
+        print(depth_str + print_str)
+
+    def GetShapeString(self):
+        if self.tensor.tf_tensor.ShapeLength() == 0:
+            return "Scalar"
+        return_string = "["
+        for shape_idx in range(self.tensor.tf_tensor.ShapeLength()):
+            if (shape_idx != 0):
+                return_string += ", "
+            return_string += str(self.tensor.tf_tensor.Shape(shape_idx))
+        return_string += "]"
+        return return_string
index 1ee1e53..b0639bb 100755 (executable)
@@ -35,34 +35,3 @@ class Tensor(object):
         self.tensor_idx = tensor_idx
         self.tf_tensor = tf_tensor
         self.tf_buffer = tf_buffer
-
-    def PrintInfo(self, depth_str=""):
-        print_str = ""
-        if self.tensor_idx < 0:
-            print_str = "Tensor {0:4}".format(self.tensor_idx)
-        else:
-            buffer_idx = self.tf_tensor.Buffer()
-            isEmpty = "Filled"
-            if (self.tf_buffer.DataLength() == 0):
-                isEmpty = " Empty"
-            shape_str = self.GetShapeString()
-            type_name = TensorTypeList[self.tf_tensor.Type()]
-
-            shape_name = ""
-            if self.tf_tensor.Name() != 0:
-                shape_name = self.tf_tensor.Name()
-
-            print_str = "Tensor {0:4} : buffer {1:4} | {2} | {3:7} | Shape {4} ({5})".format(
-                self.tensor_idx, buffer_idx, isEmpty, type_name, shape_str, shape_name)
-        print(depth_str + print_str)
-
-    def GetShapeString(self):
-        if self.tf_tensor.ShapeLength() == 0:
-            return "Scalar"
-        return_string = "["
-        for shape_idx in range(self.tf_tensor.ShapeLength()):
-            if (shape_idx != 0):
-                return_string += ", "
-            return_string += str(self.tf_tensor.Shape(shape_idx))
-        return_string += "]"
-        return return_string