2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "backend/basic/Tensor.h"
19 #include "ir/DataType.h"
20 #include "backend/basic/MemoryManager.h"
31 size_t Tensor::calcOffset(const ir::Coordinates &coords) const
33 auto shape = getShape();
34 size_t rank = shape.rank();
35 rank = rank == 0 ? 1 : rank;
37 for (size_t i = 0; i < rank; ++i)
39 auto dim = shape.rank() == 0 ? 1 : shape.dim(i);
40 offset = offset * dim + coords[i];
42 offset *= sizeOfDataType(data_type());
46 void Tensor::setShape(const ir::Shape &new_shape) { _info.shape(new_shape); }
48 bool Tensor::applyShape(const ir::Shape &new_shape)
50 bool previously_dynamic = is_dynamic();
52 auto allocTensorMem = [&]() {
53 auto capacity = total_size();
54 auto alloc = _dynamic_mem_mgr->allocate(this, capacity);
58 if (!previously_dynamic || buffer() == nullptr)
60 // Always set shape - when buffer with same size was already allocated, shape could differ
67 auto previous_size = total_size();
68 auto new_size = new_shape.num_elements() * ir::sizeOfDataType(data_type());
69 if (previous_size != new_size)
71 _dynamic_mem_mgr->deallocate(this);
78 { // when buffer with same size was already allocated, shape could differ
85 ir::Shape Tensor::getShape() const { return _info.shape(); }
87 void Tensor::deallocBuffer()
95 _dynamic_mem_mgr->deallocate(this);
101 } // namespace backend
113 // `dynamic_cast` not working across library boundaries on NDK
114 // With this as a key function, `dynamic_cast` works across dl
115 ExternalTensor::~ExternalTensor() {}
118 } // namespace backend