Drop dead functions
authorInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 04:20:01 +0000 (13:20 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 04:21:06 +0000 (13:21 +0900)
AllocateTensorBuffer and ReleaseTenosrBuffer functions aren't needed
anymore so drop them.

Change-Id: I0d3f0a7080bb3c668e903d8116569dca811e652a
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index 6707507b27fe1df8d4ae0498966d1562795f9616..e1d62c18284c4f1088f2e9edba48eb2a08220ded 100644 (file)
@@ -82,45 +82,6 @@ inference_tensor_data_type_e InferenceARMNN::ConvertDataType(armnn::DataType typ
     return data_type;
 }
 
-void *InferenceARMNN::AllocateTensorBuffer(armnn::DataType type, int tensor_size)
-{
-    void *tensor_buffer = nullptr;
-
-    switch ((int)type) {
-    case ARMNN_DATA_TYPE_FLOAT32:
-        tensor_buffer = (void *)(new float[tensor_size]);
-        break;
-    case ARMNN_DATA_TYPE_UINT8:
-        tensor_buffer = (void *)(new unsigned char[tensor_size]);
-        break;
-    default:
-        LOGE("Invalid Input tensor type.");
-        return nullptr;
-    };
-
-    return tensor_buffer;
-}
-
-void InferenceARMNN::ReleaseTensorBuffer(armnn::DataType type, void *tensor_buffer)
-{
-    if (tensor_buffer == nullptr) {
-        LOGE("Invalid Input buffer.");
-        return;
-    }
-
-    switch ((int)type) {
-        case ARMNN_DATA_TYPE_FLOAT32:
-            delete[] (float *)(tensor_buffer);
-            break;
-        case ARMNN_DATA_TYPE_UINT8:
-            delete[] (unsigned char *)(tensor_buffer);
-            break;
-        default:
-            LOGE("Invalid Input tensor type.");
-            break;
-    };
-}
-
 int InferenceARMNN::SetTargetDevices(int types)
 {
     LOGI("ENTER");
index b251cf9adaf3ac52da3058c75f599deec8514b18..52a51b64912db581cf91ebadacdd92953bdcdc92 100644 (file)
@@ -73,9 +73,7 @@ public:
 private:
     int CreateTfLiteNetwork(std::string model_path);
     int CreateNetwork(std::vector<std::string> model_paths, inference_model_format_e model_format);
-    void *AllocateTensorBuffer(armnn::DataType type, int tensor_size);
     inference_tensor_data_type_e ConvertDataType(armnn::DataType type);
-    void ReleaseTensorBuffer(armnn::DataType type, void *tensor_buffer);
     int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
                             std::vector<inference_engine_tensor_buffer> &output_buffers);