}
};
}
+
+DataProducer::Generator_sample
+FuncDataProducer::finalize_sample(const std::vector<TensorDim> &input_dims,
+ const std::vector<TensorDim> &label_dims) {
+ NNTR_THROW_IF(!this->cb, std::invalid_argument)
+ << "given callback is nullptr!";
+
+ auto input_data = std::shared_ptr<float *>(new float *[input_dims.size()],
+ std::default_delete<float *[]>());
+ auto label_data = std::shared_ptr<float *>(new float *[label_dims.size()],
+ std::default_delete<float *[]>());
+
+ return [cb = this->cb, ud = this->user_data_prop->get(), input_data,
+ label_data](unsigned int idx, std::vector<Tensor *> &inputs,
+ std::vector<Tensor *> &labels) -> bool {
+ float **input_data_raw = input_data.get();
+ float **label_data_raw = label_data.get();
+
+ for (unsigned int i = 0; i < inputs.size(); ++i) {
+ *(input_data_raw + i) = inputs[i]->getData();
+ }
+
+ for (unsigned int i = 0; i < labels.size(); ++i) {
+ *(label_data_raw + i) = labels[i]->getData();
+ }
+
+ bool last = false;
+ int status = cb(input_data_raw, label_data_raw, &last, ud);
+ NNTR_THROW_IF(status != ML_ERROR_NONE, std::invalid_argument)
+ << "[DataProducer] Callback returned error: " << status << '\n';
+
+ return last;
+ };
+}
} // namespace nntrainer
* @copydoc DataProducer::setProeprty(const std::vector<std::string>
* &properties)
*/
- virtual void setProperty(const std::vector<std::string> &properties) override;
+ void setProperty(const std::vector<std::string> &properties) override;
/**
* @copydoc DataProducer::finalize(const std::vector<TensorDim>, const
* std::vector<TensorDim>)
*/
- virtual DataProducer::Generator
+ DataProducer::Generator
finalize(const std::vector<TensorDim> &input_dims,
const std::vector<TensorDim> &label_dims) override;
+ /**
+ * @copydoc DataProducer::finalize_sample(const std::vector<TensorDim>, const
+ * std::vector<TensorDim>)
+ */
+ DataProducer::Generator_sample
+ finalize_sample(const std::vector<TensorDim> &input_dims,
+ const std::vector<TensorDim> &label_dims) override;
+
private:
datagen_cb cb;
std::unique_ptr<PropsUserData> user_data_prop;
output_.reshape(output_reshape_helper);
for (unsigned int batch = 0; batch < input_.batch(); batch++) {
- const Tensor source_tensor = Tensor::Map(
- input_.getAddress(batch, 0, idx, 0), input_reshape_helper.width(),
- {1, 1, 1, input_reshape_helper.width()});
- Tensor dest_tensor = Tensor::Map(
- output_.getAddress(batch, 0, 0, 0), output_reshape_helper.width(),
- {1, 1, 1, output_reshape_helper.width()});
+ const Tensor source_tensor =
+ Tensor::Map(input_.getAddress(batch, 0, idx, 0),
+ input_reshape_helper.width() * sizeof(float),
+ {1, 1, 1, input_reshape_helper.width()});
+ Tensor dest_tensor =
+ Tensor::Map(output_.getAddress(batch, 0, 0, 0),
+ output_reshape_helper.width() * sizeof(float),
+ {1, 1, 1, output_reshape_helper.width()});
dest_tensor.copy(source_tensor);
}
output_.reshape(output_reshape_helper);
for (unsigned int batch = 0; batch < input_.batch(); batch++) {
- Tensor dest_tensor = Tensor::Map(input_.getAddress(batch, 0, idx, 0),
- input_reshape_helper.width(),
- {1, 1, 1, input_reshape_helper.width()});
- const Tensor source_tensor = Tensor::Map(
- output_.getAddress(batch, 0, 0, 0), output_reshape_helper.width(),
- {1, 1, 1, output_reshape_helper.width()});
+ Tensor dest_tensor =
+ Tensor::Map(input_.getAddress(batch, 0, idx, 0),
+ input_reshape_helper.width() * sizeof(float),
+ {1, 1, 1, input_reshape_helper.width()});
+ const Tensor source_tensor =
+ Tensor::Map(output_.getAddress(batch, 0, 0, 0),
+ output_reshape_helper.width() * sizeof(float),
+ {1, 1, 1, output_reshape_helper.width()});
dest_tensor.copy(source_tensor);
}
}
}
-Tensor Tensor::Map(float *buf, unsigned int size, const TensorDim &d,
+Tensor Tensor::Map(float *buf, unsigned int bytes, const TensorDim &d,
int offset) {
if (d.getDataLen() == 0 || buf == nullptr) {
throw std::invalid_argument(
"[Tensor::Map] empty tensor dim is not allowed");
}
- if (d.getDataLen() + offset > size) {
+ if (d.getDataLen() * sizeof(float) + offset > bytes) {
throw std::invalid_argument(
"Creating shared tensor of size bigger than tensor memory.");
}
"[Tensor::Map] empty tensor dim is not allowed");
}
- if (d.getDataLen() + offset > size) {
+ if (d.getDataLen() * sizeof(float) + offset > size) {
throw std::invalid_argument(
"Creating shared tensor of size bigger than tensor memory.");
}
Tensor Tensor::getSharedDataTensor(const TensorDim dim_, unsigned int offset,
bool reset_stride) const {
Tensor ret = *this;
+ ret.dim = dim_;
- if (dim_.getDataLen() + offset > dim.getDataLen())
+ if (ret.bytes() + offset > bytes())
throw std::invalid_argument(
"Creating shared tensor of size bigger than tensor memory.");
- ret.dim = dim_;
if (reset_stride)
ret.strides = ret.dim.computeStrides();
* This will not copy buffer to a new tensor but directly uses it
*
* @param buf buffer
- * @param size buffer size in bytes
+ * @param bytes buffer size in bytes
* @param d tensor dim
* @param offset offset to be used from current
* @return Tensor object
* @throws std::invalid_argument if buf is null
*/
- static Tensor Map(float *buf, unsigned int size, const TensorDim &d,
+ static Tensor Map(float *buf, unsigned int bytes, const TensorDim &d,
int offset = 0);
/**
* updateBatch and then allocate again to avoid such issues.
*/
void updateBatch(unsigned int batch) {
+ if (dim.batch() == batch) {
+ return;
+ }
dim.batch(batch);
if (isAllocated())
reallocate();
generator(0, std::get<0>(sample_data), std::get<1>(sample_data)));
} else {
EXPECT_NO_THROW(
- generator(0, std::get<1>(sample_data), std::get<1>(sample_data)));
+ generator(0, std::get<0>(sample_data), std::get<1>(sample_data)));
}
}
for (unsigned i = 0; i < sz; ++i) {
auto last = generator(i, input_view, label_view);
- if (i == sz - 1) {
+ if (i == sz - 1 && has_fixed_size) {
EXPECT_TRUE(last);
} else {
ASSERT_FALSE(last) << " reached last at iteration: " << i << '\n';
#include <tensor.h>
namespace {
-std::vector<nntrainer::TensorDim> input_shapes = {{3, 2, 4, 5}, {3, 2, 3, 4}};
-std::vector<nntrainer::TensorDim> label_shapes = {{3, 1, 1, 10}, {3, 1, 1, 2}};
+std::vector<nntrainer::TensorDim> input_shapes = {{1, 2, 4, 5}, {1, 2, 3, 4}};
+std::vector<nntrainer::TensorDim> label_shapes = {{1, 1, 1, 10}, {1, 1, 1, 2}};
int user_data = 0;
-int getBatch(float **outVec, float **outLabel, bool *last, void *user_data) {
+int getSample(float **outVec, float **outLabel, bool *last, void *user_data) {
/** test user data is given correctly */
int *ud = reinterpret_cast<int *>(user_data);
*ud += 1;
/** first input/label is all zero, second input/label is all one */
auto first_input = nntrainer::Tensor::Map(
- *outVec, input_shapes[0].getDataLen(), input_shapes[0]);
+ *outVec, input_shapes[0].getDataLen() * sizeof(float), input_shapes[0]);
first_input.setValue(0);
auto second_input = nntrainer::Tensor::Map(
- *(outVec + 1), input_shapes[1].getDataLen(), input_shapes[1]);
+ *(outVec + 1), input_shapes[1].getDataLen() * sizeof(float),
+ input_shapes[1]);
second_input.setValue(1);
auto first_label = nntrainer::Tensor::Map(
- *outLabel, label_shapes[0].getDataLen(), label_shapes[0]);
+ *outLabel, label_shapes[0].getDataLen() * sizeof(float), label_shapes[0]);
first_label.setValue(0);
auto second_label = nntrainer::Tensor::Map(
- *(outLabel + 1), label_shapes[1].getDataLen(), label_shapes[1]);
+ *(outLabel + 1), label_shapes[1].getDataLen() * sizeof(float),
+ label_shapes[1]);
second_label.setValue(1);
*last = false;
return 0;
};
-int getBatch_error(float **outVec, float **outLabel, bool *last,
- void *user_data) {
+int getSample_error(float **outVec, float **outLabel, bool *last,
+ void *user_data) {
return -1;
}
} // namespace
std::unique_ptr<nntrainer::DataProducer>
-createConstantBatchProducer(const std::vector<std::string> &properties = {}) {
+createConstantSampleProducer(const std::vector<std::string> &properties = {}) {
std::unique_ptr<nntrainer::DataProducer> ptr =
- std::make_unique<nntrainer::FuncDataProducer>(getBatch, &user_data);
+ std::make_unique<nntrainer::FuncDataProducer>(getSample, &user_data);
return ptr;
}
std::unique_ptr<nntrainer::DataProducer>
-createErrorBatchProducer(const std::vector<std::string> &properties = {}) {
+createErrorSampleProducer(const std::vector<std::string> &properties = {}) {
std::unique_ptr<nntrainer::DataProducer> ptr =
- std::make_unique<nntrainer::FuncDataProducer>(getBatch_error, nullptr);
+ std::make_unique<nntrainer::FuncDataProducer>(getSample_error, nullptr);
return ptr;
}
std::unique_ptr<nntrainer::DataProducer>
-createNullBatchProducer(const std::vector<std::string> &properties = {}) {
+createNullSampleProducer(const std::vector<std::string> &properties = {}) {
std::unique_ptr<nntrainer::DataProducer> ptr =
std::make_unique<nntrainer::FuncDataProducer>(nullptr, nullptr);
return ptr;
}
auto func_success = DataProducerSemanticsParamType(
- createConstantBatchProducer, {}, input_shapes, label_shapes, validate,
+ createConstantSampleProducer, {}, input_shapes, label_shapes, validate,
DataProducerSemanticsExpectedResult::SUCCESS);
auto func_error = DataProducerSemanticsParamType(
- createErrorBatchProducer, {}, input_shapes, label_shapes, nullptr,
+ createErrorSampleProducer, {}, input_shapes, label_shapes, nullptr,
DataProducerSemanticsExpectedResult::FAIL_AT_GENERATOR_CALL);
auto func_nullptr = DataProducerSemanticsParamType(
- createNullBatchProducer, {}, input_shapes, label_shapes, nullptr,
+ createNullSampleProducer, {}, input_shapes, label_shapes, nullptr,
DataProducerSemanticsExpectedResult::FAIL_AT_FINALIZE);
INSTANTIATE_TEST_CASE_P(Func, DataProducerSemantics,
::testing::Values(func_success, func_error,
func_nullptr));
+
+INSTANTIATE_TEST_CASE_P(Func, DataProducerSemantics_samples,
+ ::testing::Values(func_success, func_error,
+ func_nullptr));
EXPECT_EQ(d.width(), 7u);
}
-TEST(nntrainer_Tensor, TensorWrap_p) {
+TEST(nntrainer_Tensor, TensorMap_p) {
float dat[] = {1, 2, 3};
{
- nntrainer::Tensor a = nntrainer::Tensor::Map(dat, 3, {3});
+ nntrainer::Tensor a = nntrainer::Tensor::Map(dat, 3 * sizeof(float), {3});
/// check if a.getData() has same address with dat
EXPECT_EQ(dat, a.getData());
{