void NNTrainer::InputTensorsInfo::getSample(float **input, float **label,
bool *last) {
ml_logd("<called>");
- ml_logd("(pop/push: %d/%d)", pop_count, push_count);
pid_t pid = getpid();
pid_t tid = syscall(SYS_gettid);
ml_logd("pid[%d], tid[%d]", pid, tid);
std::unique_lock<std::mutex> lock(queue_lock);
+ ml_logd("(pop/push: %d/%d)", pop_count, push_count);
data_empty.wait(lock, [this] { return !isQueueEmpty(); });
ml_logd("getSample condition is met");
std::vector<props::InputIsSequence>(), props::DynamicTimeSequence(false))) {
auto left = loadProperties(properties, *recurrent_props);
- std::transform(input_conns.begin(), input_conns.end(),
- std::inserter(this->input_layers, this->input_layers.begin()),
- [](const Connection &c) { return c.getName(); });
+ std::transform(
+ input_conns.begin(), input_conns.end(),
+ std::inserter(this->input_layers, this->input_layers.begin()),
+ [](const Connection &c) -> const auto & { return c.getName(); });
/// build end info.
/// eg)
/// discard index information as it is not needed as it is not really needed
this->start_layers.reserve(start_layers.size());
- std::transform(start_layers.begin(), start_layers.end(),
- std::back_inserter(this->start_layers),
- [](const Connection &c) { return c.getName(); });
+ std::transform(
+ start_layers.begin(), start_layers.end(),
+ std::back_inserter(this->start_layers),
+ [](const Connection &c) -> const auto & { return c.getName(); });
- std::transform(end_layers.begin(), end_layers.end(),
- std::inserter(this->end_layers, this->end_layers.begin()),
- [](const Connection &c) { return c.getName(); });
+ std::transform(
+ end_layers.begin(), end_layers.end(),
+ std::inserter(this->end_layers, this->end_layers.begin()),
+ [](const Connection &c) -> const auto & { return c.getName(); });
}
SliceRealizer::~SliceRealizer() {}