void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out);
void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out);
+ /*
+ * ########## IMPORTANT ##########
+ * Before changing these lines, see the comment above pipeline_manager_.
+ */
TensorsInfoManager tensors_info_manager_;
TensorsDataManager tensors_data_manager_;
// Common ML API end
// Single API end
// Pipeline API begin
+ /*
+ * ########## IMPORTANT ##########
+ * Ensure, that pipeline_manager_ field appears AFTER tensors_info_manager_
+ * and tensors_data_manager_. This means, that when ~MlInstance() is called,
+ * the pipeline_manager_ is destroyed BEFORE the managers. This should
+ * prevent the risk of race conditions, which occur in
+ * the following scenario:
+ * 1. SinkListener or CustomFilter are registered for a pipeline. Note, that
+ * they are triggered by callbacks, which run in threads other than the main.
+ * 2. Pipeline is running and callbacks called in different threads
+ * use Tensor{Info, Data} objects, managed by tensor_{info, data}_manager_s.
+ * 3. The application exits or is reloaded.
+ *
+ * If managers would be destroyed before the pipeline, to which
+ * SinkListener/CustomFilter belongs, the callbacks (running in not-main
+ * threads) could still have pointers to freed Tensors{Info, Data}.
+ *
+ * When pipeline_manager_ is destroyed before
+ * tensors_{info, data}_manager_s, the Pipeline is destroyed and its
+ * SinkListeners/CustomFilters are unregistered when Tensors{Info, Data} are
+ * still valid.
+ */
PipelineManager pipeline_manager_;
// PipelineManager::createPipeline() begin
TensorsDataManager::~TensorsDataManager() {
ScopeLogger();
+
+ std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
map_.clear();
};
return nullptr;
}
+ std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
int id = nextId_++;
auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
map_[id] = std::move(t);
TensorsData* TensorsDataManager::GetTensorsData(int id) {
ScopeLogger("id: %d", id);
+ std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
if (map_.end() != map_.find(id)) {
return map_[id].get();
}
return PlatformResult(ErrorCode::ABORT_ERR);
}
+ std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
map_.erase(t->Id());
return PlatformResult(ErrorCode::NO_ERROR);
#ifndef __ML_TENSORS_DATA_MANAGER_H__
#define __ML_TENSORS_DATA_MANAGER_H__
+#include <mutex>
#include <unordered_map>
#include "common/logger.h"
TensorsDataManager(TensorsDataManager const&) = delete;
TensorsDataManager& operator=(TensorsDataManager const&) = delete;
+ /*
+ * For performance reasons and simplicity we use a single mutex
+ * to lock map_ and nextId_. They are often used together
+ * and we'd have to lock all of them anyway.
+ */
+ std::mutex map_and_next_id_mutex_;
std::unordered_map<int, std::unique_ptr<TensorsData>> map_;
int nextId_;
};
TensorsInfoManager::~TensorsInfoManager() {
ScopeLogger();
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
map_by_id_.clear();
map_by_handle_.clear();
};
return nullptr;
}
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
int id = nextId_++;
auto t = std::make_shared<TensorsInfo>(handle, id);
map_by_id_[id] = t;
TensorsInfo* TensorsInfoManager::CreateTensorsInfo(ml_tensors_info_h handle) {
ScopeLogger();
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
int id = nextId_++;
auto t = std::make_shared<TensorsInfo>(handle, id);
+
map_by_id_[id] = t;
map_by_handle_[handle] = t;
return nullptr;
}
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
int id = nextId_++;
auto t = src->CreateClone(id);
if (nullptr == t) {
TensorsInfo* TensorsInfoManager::GetTensorsInfo(int id) {
ScopeLogger("id: %d", id);
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
if (map_by_id_.end() != map_by_id_.find(id)) {
return map_by_id_[id].get();
}
TensorsInfo* TensorsInfoManager::GetTensorsInfo(ml_tensors_info_h handle) {
ScopeLogger();
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
if (map_by_handle_.end() != map_by_handle_.find(handle)) {
return map_by_handle_[handle].get();
}
return PlatformResult(ErrorCode::ABORT_ERR);
}
+ std::lock_guard<std::mutex> lock{maps_and_next_id_mutex_};
map_by_handle_.erase(t->Handle());
map_by_id_.erase(t->Id());
#define __ML_TENSORS_INFO_MANAGER_H__
#include <map>
+#include <mutex>
#include <vector>
#include "common/logger.h"
private:
TensorsInfoManager(TensorsInfoManager const&) = delete;
TensorsInfoManager& operator=(TensorsInfoManager const&) = delete;
+
+ /*
+ * For performance reasons and simplicity we use a single mutex
+ * to lock both maps and nextId_. They are often used together
+ * and we'd have to lock all of them anyway.
+ */
+ std::mutex maps_and_next_id_mutex_;
std::map<int, std::shared_ptr<TensorsInfo>> map_by_id_;
std::map<ml_tensors_info_h, std::shared_ptr<TensorsInfo>> map_by_handle_;
int nextId_;