This patch removes initContext::getOutputDimensions().
This function is only used in the test so removed
(It is used in network_graph but soon will be substitued)
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
* @todo Support multi-input graph.
*/
+#include "tensor.h"
#include <cmath>
#include <stdexcept>
#include <string>
* allocated input. This is neccesary for manager to know when this output
* node is going to be used with in-place optimizations.
*/
+ std::vector<TensorDim> out_dims;
+ for (auto &spec : init_context.getOutSpecs()) {
+ out_dims.push_back(spec.variable_spec.dim);
+ }
unsigned int max_fwd_exec_order = graph.size();
const std::vector<Var_Grad *> &outputs = tensor_manager->requestOutputs(
- gnode, init_context.getOutputDimensions(), inputs_name, max_fwd_exec_order,
- shared_var, shared_grad);
+ gnode, out_dims, inputs_name, max_fwd_exec_order, shared_var, shared_grad);
/** create shared weight names if requested */
std::vector<std::string> shared_weight_names;
}
}
-const std::vector<VarGradSpecV2> &InitLayerContext::getOutSpecs() {
+const std::vector<VarGradSpecV2> &InitLayerContext::getOutSpecs() const {
return output_specs;
}
}
/**
- * @brief Get the Output Dimensions object
- *
- * @return std::vector<TensorDim>& Output dimensions
- */
- const std::vector<TensorDim> getOutputDimensions() const {
- std::vector<TensorDim> output_dim;
- for (auto &spec : output_specs) {
- output_dim.push_back(spec.variable_spec.dim);
- }
- return output_dim;
- }
-
- /**
* @brief Set the Output Dimensions object
*
* @param out_dim the output dimension to set to
*
* @return std::vector<VarGradSpecV2> out specification
*/
- const std::vector<VarGradSpecV2> &getOutSpecs();
+ const std::vector<VarGradSpecV2> &getOutSpecs() const;
/**
* @brief Validate the context
if (!must_fail) {
nntrainer::InitLayerContext init_context = lnode->finalize();
- for (auto const &dim : init_context.getOutputDimensions())
- EXPECT_GT(dim.getDataLen(), size_t(0));
+ for (auto const &spec : init_context.getOutSpecs())
+ EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
for (auto const &ws : init_context.getWeightsSpec())
EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
for (auto const &ts : init_context.getTensorsSpec())
* @bug No known bugs except for NYI items
*/
#include <layers_common_tests.h>
+#include <tensor_wrap_specs.h>
#include <fstream>
#include <type_traits>
return vg;
};
+ auto allocate_tensors_v2 = [](const std::vector<VarGradSpecV2> &specs) {
+ std::vector<Var_Grad> vg;
+ vg.reserve(specs.size());
+
+ for (auto &spec : specs) {
+ /// todo initializer should be depending is as well
+ vg.emplace_back(spec.variable_spec.dim, Tensor::Initializer::NONE, true,
+ true, "golden");
+ }
+ return vg;
+ };
+
auto allocate_weights = [&file](const auto &specs) {
std::vector<Weight> weights;
weights.reserve(specs.size());
return {
allocate_weights(context.getWeightsSpec()),
allocate_inouts(context.getInputDimensions()),
- allocate_inouts(context.getOutputDimensions()),
+ allocate_tensors_v2(context.getOutSpecs()),
allocate_tensors(context.getTensorsSpec()),
};
}
if (!must_fail) {
EXPECT_NO_THROW(layer->finalize(init_context));
- for (auto const &dim : init_context.getOutputDimensions())
- EXPECT_GT(dim.getDataLen(), size_t(0));
+ for (auto const &spec : init_context.getOutSpecs())
+ EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
for (auto const &ws : init_context.getWeightsSpec())
EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
for (auto const &ts : init_context.getTensorsSpec())