2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
18 #define __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
20 #include "Allocator.h"
22 #include <backend/IPortableTensor.h>
23 #include <ir/OperandInfo.h>
33 class DynamicMemoryManager;
35 class Tensor : public IPortableTensor
42 Tensor(const ir::OperandInfo &info, const ir::Layout layout,
43 DynamicMemoryManager *dynamic_mem_mgr)
44 : IPortableTensor(info), _layout(layout), _buffer(nullptr), _num_references(0),
45 _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
51 // Only one of two method 'setBuffer' must be called once
54 * @brief Set the Buffer object. This method is called for static and non-const tensor
56 void setBuffer(uint8_t *buffer)
58 assert(_buffer == nullptr);
63 * @brief Set the Buffer object. This method is called for dynamic or const tensor
65 void setBuffer(const std::shared_ptr<Allocator> &alloc)
67 assert(_buffer == nullptr);
69 _buffer = alloc->base();
72 // This works just as setBuffer but it simply overwrite existing Allocator without nullptr check
73 void overwriteBuffer(const std::shared_ptr<Allocator> &alloc)
76 _buffer = alloc->base();
80 * @brief Mark this tensor does not have memory.
81 * Real memory deallocation should be done by caller.
90 uint8_t *buffer() const override { return _buffer; }
92 * @brief Get dimension by index
94 * @param index Index to get diemension
95 * @return size_t Dimension at index
96 * @note N : dimension(0)
101 size_t dimension(size_t index) const final override { return _info.shape().dim(index); }
102 size_t num_dimensions() const override { return _info.shape().rank(); }
103 size_t total_size() const override { return _info.total_size(); }
104 size_t calcOffset(const ir::Coordinates &coords) const override;
105 ir::Layout layout() const override { return _layout; }
106 ir::DataType data_type() const override { return _info.typeInfo().type(); }
107 float data_scale() const override { return _info.typeInfo().scale(); }
108 int32_t data_offset() const override { return _info.typeInfo().offset(); }
109 bool is_constant() const override { return _info.isConstant(); }
110 bool is_dynamic() const override { return _info.isDynamic(); }
111 void set_dynamic() override { _info.setDynamic(); }
112 bool applyShape(const ir::Shape &new_shape) override;
113 const ir::Sparsity *sparsity() const override { return _info.typeInfo().sparsity(); }
115 virtual void increase_ref()
117 assert(is_dynamic() ||
119 (_buffer != nullptr));
124 virtual void decrease_ref()
126 assert(_buffer != nullptr || _allocator != nullptr);
127 assert(_num_references > 0);
129 // constant tensor and dynamic tensor has _allocator
130 if (_num_references == 0)
132 if (_buffer != nullptr)
134 if (_allocator != nullptr)
136 _allocator->release();
137 _allocator = nullptr;
143 * @brief Reset reference count to zero and release data
145 virtual void reset_ref()
147 assert(_buffer != nullptr || _allocator != nullptr);
148 assert(_num_references > 0);
151 // Only constant tensor has allocator pointer
152 if (_buffer != nullptr)
156 _allocator->release();
157 _allocator = nullptr;
161 virtual int32_t num_references() { return _num_references; }
163 void setShape(const ir::Shape &new_shape) override;
168 int32_t _num_references;
169 DynamicMemoryManager *_dynamic_mem_mgr;
173 * @brief Memory allocator for dynamic tensor and const tensor
174 * Since maintaing _allocator and also _buffer makes confusion,
175 * we will mainly use _buffer (not _allocator.base()) for memory pointer in this code.
176 * _allocator(shared_ptr) is used to guarantee that we have valid _buffer.
178 std::shared_ptr<Allocator> _allocator;
182 * @brief Class that uses data from external memory that is not managed by a backend
183 * instead of allocating and copying the data. ExternalTensor's data pointer points to
184 * an address of memory such as where memory is already allocated, or mmapped area.
185 * This is meaning that ExternalTensor can take all of types' ir::Data.
186 * To support this, assume below things no padding, always NHWC layout,
187 * constant tensor and not dynamic.
189 class ExternalTensor : public Tensor
192 ExternalTensor() = delete;
193 virtual ~ExternalTensor();
196 ExternalTensor(const ir::OperandInfo &info, const ir::Layout layout)
197 : Tensor(info, layout, nullptr)
199 assert(_layout == ir::Layout::NHWC);
200 assert(_info.isConstant());
201 assert(_info.isDynamic() == false);
206 * @brief set Data to be shared from external so that this ExternalTensor will not be
207 * allocated on CPU backend
208 * @param[in] data data of Operand to be set
210 void setData(const std::shared_ptr<ir::Data> data)
212 assert(data != nullptr);
214 // Note. Some op such as cker::Conv could take buffer as nullptr.
215 // That's why _buffer also would be used
216 _buffer = const_cast<uint8_t *>(_data->base());
220 uint8_t *buffer() const override { return _buffer; }
222 bool is_constant() const override { return true; }
223 bool is_dynamic() const override { return false; }
224 void set_dynamic() override
226 throw std::runtime_error("This tensor does not support changing dynamic");
229 void setShape(const ir::Shape &) override
231 throw std::runtime_error("This tensor does not support changing shape");
234 void increase_ref() override { ++_num_references; }
236 void decrease_ref() override
238 assert(_data != nullptr);
239 assert(_num_references > 0);
241 if (_num_references == 0)
249 * @brief Reset reference count to zero and release data
251 void reset_ref() override
253 assert(_data != nullptr);
254 assert(_num_references > 0);
261 int32_t num_references() override { return _num_references; }
264 std::shared_ptr<const ir::Data> _data;
266 } // namespace cpu_common
267 } // namespace backend
270 #endif // __ONERT_BACKEND_CPU_COMMON_TENSOR_H__