2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_EXEC_FEATURE_NCHW_READER_H__
18 #define __ONERT_EXEC_FEATURE_NCHW_READER_H__
20 #include "../Reader.h"
24 #include "backend/ITensor.h"
36 template <typename T> class Reader final : public feature::Reader<T>
39 // Construct for buffer of model inputs
40 Reader(const ir::FeatureShape &shape, const T *ptr, size_t len)
41 : _shape{shape}, _ptr{reinterpret_cast<const uint8_t *>(ptr)}, _len{len}
43 assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
46 _strides.W = sizeof(T);
47 _strides.H = shape.W * sizeof(T);
48 _strides.C = shape.W * shape.H * sizeof(T);
49 _strides.N = shape.W * shape.H * shape.C * sizeof(T);
52 // Construct for backend tensor
53 Reader(backend::ITensor *tensor)
54 : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
56 assert(tensor->layout() == ir::Layout::NCHW);
58 const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
59 _strides.W = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
60 _strides.H = tensor->dimension(2) == 1 ? 0 : tensor->calcOffset({0, 0, 1, 0}) - start_offset;
61 _strides.C = tensor->dimension(1) == 1 ? 0 : tensor->calcOffset({0, 1, 0, 0}) - start_offset;
62 _strides.N = tensor->dimension(0) == 1 ? 0 : tensor->calcOffset({1, 0, 0, 0}) - start_offset;
64 _shape.W = tensor->dimension(3);
65 _shape.H = tensor->dimension(2);
66 _shape.C = tensor->dimension(1);
67 _shape.N = tensor->dimension(0);
71 T at(uint32_t ch, uint32_t row, uint32_t col) const override
73 const auto offset = feature_index_to_byte_offset(0, ch, row, col);
75 const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
79 T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
81 const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
83 const T *ptr = reinterpret_cast<const T *>(_ptr + offset);
89 size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
91 assert(1u * _shape.N > batch); // shape.N > batch
92 assert(1u * _shape.C > ch); // shape.C > ch
93 assert(1u * _shape.H > row); // shape.H > row
94 assert(1u * _shape.W > col); // shape.W > col
97 res += batch * _strides.N;
98 res += ch * _strides.C;
99 res += row * _strides.H;
100 res += col * _strides.W;
106 // TODO Remove _shape
107 ir::FeatureShape _shape;
108 using Strides = ir::FeatureShape;
115 } // namespace feature
119 #endif // __ONERT_EXEC_FEATURE_NCHW_READER_H__