1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2020 Jihoon Lee <jhoon.it.lee@samsung.com>
5 * @file plugged_layer.h
6 * @date 27 January 2021
7 * @brief This file contains a wrapper for a plugged layer, INTERNAL USE ONLY
8 * @see https://github.com/nnstreamer/nntrainer
9 * @author Jihoon Lee <jhoon.it.lee@samsung.com>
10 * @bug No known bugs except for NYI items
14 #ifndef __PLUGGED_LAYER_H__
15 #define __PLUGGED_LAYER_H__
19 #include <layer_internal.h>
21 #include <nntrainer_error.h>
27 * @brief PluggedLayer to wrap a layer from shared object file
30 class PluggedLayer : public nntrainer::LayerV1 {
33 * @brief Construct a new Plugged Layer object
35 * @param pluggable LayerPluggable structure from the symbol
37 PluggedLayer(const nntrainer::LayerV1Pluggable *pluggable) :
38 /// @todo we won't need dynamic pointer cast here after api is fully
40 layerImpl(pluggable->createfunc()),
41 destroy_func(pluggable->destroyfunc) {
42 NNTR_THROW_IF(layerImpl == nullptr, std::invalid_argument)
43 << "either create_func_ failed or cannot dynamic cast to layer_internal";
47 * @brief Destroy the Plugged Layer object
50 ~PluggedLayer() override { destroy_func(layerImpl); }
53 * @brief Move Contruct Plugged Layer object
55 * @param rhs layer to move
57 PluggedLayer(PluggedLayer &&rhs) noexcept = default;
60 * @brief Move assign Plugged Layer Object
62 * @param rhs layer to move
63 * @return PluggedLayer& *this
65 PluggedLayer &operator=(PluggedLayer &&rhs) = default;
68 * @copydoc Layer::initialize(Manager &manager)
70 int initialize(Manager &manager) override {
71 return layerImpl->initialize(manager);
75 * @copydoc Layer::forwarding(bool training)
77 void forwarding(bool training = true) override {
78 layerImpl->forwarding(training);
82 * @copydoc Layer::calcDerivative()
84 void calcDerivative() override { layerImpl->calcDerivative(); }
87 * @copydoc Layer::calcGradient()
89 void calcGradient() override { layerImpl->calcGradient(); }
92 * @copydoc Layer::applyGradient(unsigned int, std::shared_ptr<Optimizer>)
94 void applyGradient(unsigned int iteration,
95 std::shared_ptr<Optimizer> optimizer) override {
96 layerImpl->applyGradient(iteration, std::move(optimizer));
100 * @copydoc Layer::read(std::ifstream &file)
102 void read(std::ifstream &file) override { layerImpl->read(file); }
105 * @copydoc Layer::save(std::ofstream &file)
107 void save(std::ofstream &file) override { layerImpl->save(file); }
110 * @copydoc Layer::setProperty(std::vector<std::string> values)
112 int setProperty(std::vector<std::string> values) override {
113 return layerImpl->setProperty(std::move(values));
117 * @copydoc Layer::checkValidation()
119 int checkValidation() override { return layerImpl->checkValidation(); }
122 * @copydoc Layer::getOutputDimension()
124 std::vector<TensorDim> getOutputDimension() override {
125 return layerImpl->getOutputDimension();
129 * @copydoc Layer::getInputDimension()
131 std::vector<TensorDim> getInputDimension() override {
132 return layerImpl->getInputDimension();
136 * @copydoc Layer::getLoss()
138 float getLoss() override { return layerImpl->getLoss(); }
141 * @copydoc Layer::copy(std::shared_ptr<Layer> l)
143 void copy(std::shared_ptr<LayerV1> l) override { layerImpl->copy(l); }
146 * @copydoc Layer::setTrainable(bool train)
148 void setTrainable(bool train) override { layerImpl->setTrainable(train); }
151 * @copydoc Layer::getTrainable()
153 bool getTrainable() noexcept override { return layerImpl->getTrainable(); }
156 * @copydoc Layer::getWeights()
158 std::vector<Weight> getWeights() override { return layerImpl->getWeights(); }
161 * @copydoc Layer::getType()
163 virtual const std::string getType() const override {
164 return layerImpl->getType();
168 * @copydoc Layer::printPreset(std::ostream &out, PrintPreset preset)
170 void printPreset(std::ostream &out,
171 PrintPreset preset = PrintPreset::PRINT_SUMMARY) override {
172 return layerImpl->printPreset(out, preset);
176 * @copydoc Layer::weightAt(const unsigned int position)
178 Weight &weightAt(const unsigned int position) override {
179 return layerImpl->weightAt(position);
183 * @copydoc Layer::getNumWeights()
185 unsigned int getNumWeights() override { return layerImpl->getNumWeights(); }
188 * @copydoc Layer::setBatch(unsigned int batch)
190 void setBatch(unsigned int batch) override {
191 return layerImpl->setBatch(batch);
195 * @copydoc Layer::scaleSize(float scalesize)
197 void scaleSize(float scalesize) noexcept override {
198 return layerImpl->scaleSize(scalesize);
202 * @copydoc Layer::resetDimension()
204 void resetDimension() override { return layerImpl->resetDimension(); }
207 * @copydoc Layer::getOutputs()
209 std::vector<Tensor> getOutputs() override { return layerImpl->getOutputs(); }
212 * @copydoc Layer::getDerivatives()
214 std::vector<Tensor> getDerivatives() override {
215 return layerImpl->getDerivatives();
219 * @copydoc Layer::getWeightsRef()
221 std::vector<Weight> &getWeightsRef() override {
222 return layerImpl->getWeightsRef();
226 * @copydoc Layer::setInputBuffers(std::vector<std::shared_ptr<VarGrad>>
229 void setInputBuffers(std::vector<std::shared_ptr<Var_Grad>> inputs) override {
230 return layerImpl->setInputBuffers(std::move(inputs));
234 * @copydoc Layer::setOutputBuffers(std::vector<std::shared_ptr<Var_Grad>>
238 setOutputBuffers(std::vector<std::shared_ptr<Var_Grad>> outputs) override {
239 return layerImpl->setOutputBuffers(std::move(outputs));
243 unsigned int getNumInputs() override { return layerImpl->getNumInputs(); }
244 unsigned int getNumOutputs() override { return layerImpl->getNumOutputs(); }
248 /// @todo: migrate to ml::train::Layer
249 // ml::train::Layer *layerImpl;
250 nntrainer::LayerV1 *layerImpl;
251 nntrainer::DestroyLayerV1Func destroy_func;
253 } // namespace internal
254 } // namespace nntrainer
256 #endif // __PLUGGED_LAYER_H__