2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
18 #define __ONERT_BACKEND_CPU_COMMON_TENSOR_H__
20 #include "Allocator.h"
22 #include <backend/IPortableTensor.h>
23 #include <ir/OperandInfo.h>
32 class Tensor : public IPortableTensor
38 Tensor(const ir::OperandInfo &info, const ir::Layout layout,
39 IDynamicTensorManager *dynamic_tensor_manager)
40 : _info(info), _layout(layout), _buffer(nullptr), _num_references(0),
41 _dynamic_tensor_manager(dynamic_tensor_manager), _allocator(nullptr)
47 // Only one of two method 'setBuffer' must be called once
50 * @brief Set the Buffer object. This method is called for static and non-const tensor
52 void setBuffer(uint8_t *buffer)
54 assert(_buffer == nullptr);
59 * @brief Set the Buffer object. This method is called for dynamic or const tensor
61 void setBuffer(const std::shared_ptr<Allocator> &alloc)
63 assert(_buffer == nullptr);
65 _buffer = alloc->base();
68 // This works just as setBuffer but it simply overwrite existing Allocator without nullptr check
69 void overwriteBuffer(const std::shared_ptr<Allocator> &alloc)
72 _buffer = alloc->base();
76 * @brief Mark this tensor does not have memory.
77 * Real memory deallocation should be done by caller.
86 uint8_t *buffer() const override { return _buffer; }
88 * @brief Get dimension by index
90 * @param index Index to get diemension
91 * @return size_t Dimension at index
92 * @note N : dimension(0)
97 size_t dimension(size_t index) const override { return _info.shape().dim(index); }
98 size_t num_dimensions() const override { return _info.shape().rank(); }
99 size_t total_size() const override { return _info.total_size(); }
100 size_t calcOffset(const ir::Coordinates &coords) const override;
101 ir::Layout layout() const override { return _layout; }
102 ir::DataType data_type() const override { return _info.typeInfo().type(); }
103 float data_scale() const override { return _info.typeInfo().scale(); }
104 int32_t data_offset() const override { return _info.typeInfo().offset(); }
105 bool is_constant() const override { return _info.isConstant(); }
106 bool is_dynamic() const override { return _info.isDynamic(); }
107 void set_dynamic() override { _info.setDynamic(); }
108 IDynamicTensorManager *dynamic_tensor_manager() override { return _dynamic_tensor_manager; }
109 bool is_sparse() const override { return _info.typeInfo().sparse(); }
110 virtual const uint16_t *w1_segments() const override { return _info.typeInfo().w1_segments(); }
111 virtual const uint16_t *w1_indices() const override { return _info.typeInfo().w1_indices(); }
113 virtual void increase_ref()
115 assert(is_dynamic() ||
117 (_buffer != nullptr));
121 virtual void decrease_ref()
123 assert(_buffer != nullptr || _allocator != nullptr);
124 assert(_num_references > 0);
126 // constant tensor and dynamic tensor has _allocator
127 if (_num_references == 0)
129 if (_buffer != nullptr)
131 if (_allocator != nullptr)
133 _allocator->release();
134 _allocator = nullptr;
139 void setShape(const ir::Shape &new_shape) override;
142 ir::OperandInfo _info;
145 int32_t _num_references;
146 IDynamicTensorManager *_dynamic_tensor_manager;
150 * @brief Memory allocator for dynamic tensor and const tensor
151 * Since maintaing _allocator and also _buffer makes confusion,
152 * we will mainly use _buffer (not _allocator.base()) for memory pointer in this code.
153 * _allocator(shared_ptr) is used to guarantee that we have valid _buffer.
155 std::shared_ptr<Allocator> _allocator;
158 } // namespace cpu_common
159 } // namespace backend
162 #endif // __ONERT_BACKEND_CPU_COMMON_TENSOR_H__