2 * Copyright (c) 2016-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #include "arm_compute/runtime/TensorAllocator.h"
26 #include "arm_compute/core/Coordinates.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/runtime/MemoryGroup.h"
30 #include "arm_compute/runtime/MemoryRegion.h"
31 #include "support/ToolchainSupport.h"
35 using namespace arm_compute;
39 bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &child_info, const Coordinates &coords)
42 const TensorShape &parent_shape = parent_info.tensor_shape();
43 const TensorShape &child_shape = child_info.tensor_shape();
44 const size_t parent_dims = parent_info.num_dimensions();
45 const size_t child_dims = child_info.num_dimensions();
47 if(child_dims <= parent_dims)
49 for(size_t num_dimensions = child_dims; num_dimensions > 0; --num_dimensions)
51 const size_t child_dim_size = coords[num_dimensions - 1] + child_shape[num_dimensions - 1];
53 if((coords[num_dimensions - 1] < 0) || (child_dim_size > parent_shape[num_dimensions - 1]))
69 TensorAllocator::TensorAllocator(Tensor *owner)
70 : _associated_memory_group(nullptr), _memory(), _owner(owner)
74 TensorAllocator::~TensorAllocator()
76 info().set_is_resizable(true);
79 TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
80 : ITensorAllocator(std::move(o)),
81 _associated_memory_group(o._associated_memory_group),
82 _memory(std::move(o._memory)),
85 o._associated_memory_group = nullptr;
90 TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
94 _associated_memory_group = o._associated_memory_group;
95 o._associated_memory_group = nullptr;
97 _memory = std::move(o._memory);
103 ITensorAllocator::operator=(std::move(o));
108 void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo sub_info)
111 const TensorInfo parent_info = allocator.info();
113 // Check if coordinates and new shape are within the parent tensor
114 ARM_COMPUTE_ERROR_ON(!validate_subtensor_shape(parent_info, sub_info, coords));
115 ARM_COMPUTE_UNUSED(validate_subtensor_shape);
117 // Copy pointer to buffer
118 _memory = Memory(allocator._memory.region());
120 // Init tensor info with new dimensions
121 size_t total_size = parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
122 sub_info.init(sub_info.tensor_shape(), sub_info.format(), parent_info.strides_in_bytes(), parent_info.offset_element_in_bytes(coords), total_size);
128 uint8_t *TensorAllocator::data() const
130 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
131 return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
134 void TensorAllocator::allocate()
136 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
137 ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
139 if(_associated_memory_group == nullptr)
141 _memory = Memory(std::make_shared<MemoryRegion>(info().total_size()));
145 _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(_memory.region()->handle()), info().total_size());
146 _memory.region()->set_size(info().total_size());
148 info().set_is_resizable(false);
151 void TensorAllocator::free()
154 info().set_is_resizable(true);
157 arm_compute::Status TensorAllocator::import_memory(Memory memory)
159 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
160 ARM_COMPUTE_RETURN_ERROR_ON(memory.region()->buffer() == nullptr);
161 ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
163 info().set_is_resizable(false);
168 void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)
170 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
171 ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
172 ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
173 ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
174 _associated_memory_group = associated_memory_group;
177 uint8_t *TensorAllocator::lock()
179 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
180 return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
183 void TensorAllocator::unlock()