2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_EXEC_FEATURE_NHWC_VIEW_H__
18 #define __ONERT_EXEC_FEATURE_NHWC_VIEW_H__
20 #include "../Reader.h"
25 #include "backend/ITensor.h"
27 #include "util/Utils.h"
38 template <typename T> class View final : public feature::Reader<T>
41 // Construct for buffer of model inputs
42 View(const ir::FeatureShape &shape, T *ptr, size_t len)
43 : _shape{shape}, _ptr{reinterpret_cast<uint8_t *>(ptr)}, _len{len}
45 UNUSED_RELEASE(len); // Workaround for unused variable in release mode
46 assert(shape.N * shape.H * shape.W * shape.C * sizeof(T) == len);
49 _strides.C = sizeof(T);
50 _strides.W = shape.C * sizeof(T);
51 _strides.H = shape.C * shape.W * sizeof(T);
52 _strides.N = shape.C * shape.W * shape.H * sizeof(T);
55 // Construct for backend tensor
56 View(backend::ITensor *tensor)
57 : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
59 assert(tensor->layout() == ir::Layout::NHWC);
61 const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
62 _strides.C = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
63 _strides.W = tensor->dimension(2) == 1 ? 0 : tensor->calcOffset({0, 0, 1, 0}) - start_offset;
64 _strides.H = tensor->dimension(1) == 1 ? 0 : tensor->calcOffset({0, 1, 0, 0}) - start_offset;
65 _strides.N = tensor->dimension(0) == 1 ? 0 : tensor->calcOffset({1, 0, 0, 0}) - start_offset;
67 _shape.C = tensor->dimension(3);
68 _shape.W = tensor->dimension(2);
69 _shape.H = tensor->dimension(1);
70 _shape.N = tensor->dimension(0);
74 T at(uint32_t row, uint32_t col, uint32_t ch) const override
76 const auto offset = feature_index_to_byte_offset(0, row, col, ch);
78 const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
82 T at(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch) const override
84 const auto offset = feature_index_to_byte_offset(batch, row, col, ch);
86 const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
91 T &at(uint32_t row, uint32_t col, uint32_t ch)
93 const auto offset = feature_index_to_byte_offset(0, row, col, ch);
95 T *ptr = reinterpret_cast<T *>(_ptr + offset);
100 T &at(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch)
102 const auto offset = feature_index_to_byte_offset(batch, row, col, ch);
104 T *ptr = reinterpret_cast<T *>(_ptr + offset);
110 size_t feature_index_to_byte_offset(uint32_t batch, uint32_t row, uint32_t col, uint32_t ch) const
112 assert(1u * _shape.N > batch); // shape.N > batch
113 assert(1u * _shape.H > row); // shape.H > row
114 assert(1u * _shape.W > col); // shape.W > col
115 assert(1u * _shape.C > ch); // shape.C > ch
118 res += batch * _strides.N;
119 res += row * _strides.H;
120 res += col * _strides.W;
121 res += ch * _strides.C;
127 // TODO Remove _shape
128 ir::FeatureShape _shape;
129 using Strides = ir::FeatureShape;
136 } // namespace feature
140 #endif // __ONERT_EXEC_FEATURE_NHWC_VIEW_H__