This PR resolves coverity issues of overflow, use of auto that causes a copy, missing lock and thread lock.
**Self-evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Donghyeon Jeong <dhyeon.jeong@samsung.com>
}
void IterationQueue::MarkableIteration::markSampleFilled() {
- std::scoped_lock notify_lock_guard(notify_mutex);
+ std::unique_lock notify_lock_guard(notify_mutex);
num_observed++;
if (num_observed == iteration.batch()) {
- iq->markFilled(this);
num_observed = 0;
+ notify_lock_guard.unlock();
+ iq->markFilled(this);
+ notify_lock_guard.lock();
}
}
std::vector<Tensor> &labels) {
NNTR_THROW_IF(idx >= sz, std::range_error)
<< "given index is out of bound, index: " << idx << " size: " << sz;
- std::streamoff offset = static_cast<std::streamoff>(
- idx * sample_size * RawFileDataProducer::pixel_size);
+ std::streamoff offset = static_cast<std::streamoff>(idx) *
+ static_cast<std::streamoff>(sample_size) *
+ RawFileDataProducer::pixel_size;
file.seekg(offset, std::ios_base::beg);
for (auto &input : inputs) {
input.read(file);
std::map<std::string, std::vector<unsigned int>> exec_orders;
for (auto &spec : out_specs) {
- const auto name = lnode->getName() + ":" + spec.variable_spec.name;
+ const auto &name = lnode->getName() + ":" + spec.variable_spec.name;
auto orders = tensor_manager->getTensorExecutionOrders(name, false);
exec_orders.insert({name, orders});
try {
}
for (auto &spec : weight_specs) {
- const auto name = std::get<const std::string>(spec);
+ const auto &name = std::get<const std::string>(spec);
auto orders = tensor_manager->getTensorExecutionOrders(name, true);
exec_orders.insert({name, orders});
try {
*
*/
explicit CacheElem(std::shared_ptr<SwapDevice> dev, unsigned int mem_id,
- size_t off, size_t len,
- std::shared_ptr<MemoryData> data,
+ size_t off, size_t len, std::shared_ptr<MemoryData> data,
CachePolicy pol = CachePolicy::ALWAYS_SYNCED) :
initial_opt(Options::FIRST_ACCESS),
device(dev),
*
* @return active status
*/
- bool isActive() const { return active; }
+ bool isActive() const {
+ std::scoped_lock lg(device_mutex);
+ return active;
+ }
/**
* @brief get length of cache element
void reset() { initial_opt = Options::FIRST_ACCESS; }
private:
- Options initial_opt; /**< accessed */
- std::mutex device_mutex; /**< protect device */
- std::shared_ptr<SwapDevice> device; /**< swap device */
- bool active; /**< element is loaded */
- unsigned int id; /**< memory id */
- size_t offset; /**< element offset from swap device */
- size_t length; /**< element size */
- CachePolicy policy; /**< cache policy */
+ Options initial_opt; /**< accessed */
+ mutable std::mutex device_mutex; /**< protect device */
+ std::shared_ptr<SwapDevice> device; /**< swap device */
+ bool active; /**< element is loaded */
+ unsigned int id; /**< memory id */
+ size_t offset; /**< element offset from swap device */
+ size_t length; /**< element size */
+ CachePolicy policy; /**< cache policy */
std::shared_ptr<MemoryData> mem_data; /**< allocated memory data */
};
mkModelTc_V2(makeNonTrainableFcIdx3, "non_trainable_fc_idx3",
ModelTestOption::ALL_V2),
}),
- [](const testing::TestParamInfo<nntrainerModelTest::ParamType> &info) {
- return std::get<1>(info.param);
- });
+ [](const testing::TestParamInfo<nntrainerModelTest::ParamType> &info)
+ -> const auto & { return std::get<1>(info.param); });
#ifdef NDK_BUILD