assert(!_ready_jobs.empty()); // Cannot begin if there is no initial jobs
bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
- // Notifiy Execution Begin
- for (auto &o : _observers)
- {
- o->handleBegin(this);
- }
+ _subject.notifyModelBegin(this);
while (!_ready_jobs.empty())
{
_ready_jobs.erase(_ready_jobs.begin());
auto job_index = job->index();
VERBOSE(DataflowExecutor) << "Run job #" << job_index << std::endl;
- notifyJobBegin(job_index);
+
+ auto subgraph_index = _job_to_subgraph[job_index];
+ // Workaround - assumes only one operation in a subgraph
+ auto op = _subgraphs->at(subgraph_index).operations().at(0).node;
+ const backend::Backend *backend = _lower_info->operation.at(subgraph_index)->backend();
+
+ _subject.notifyJobBegin(this, op, backend);
+
if (is_profiling)
job->fn()->runSync();
else
job->run();
- notifyJobEnd(job_index);
+
+ _subject.notifyJobEnd(this, op, backend);
notify(job_index);
_finished_jobs[job_index] = std::move(job);
}
assert(noWaitingJobs());
- for (auto &o : _observers)
- {
- o->handleEnd(this);
- }
+ _subject.notifyModelEnd(this);
// Reset input info for the next execution
_input_info = _initial_input_info;
}
-void DataflowExecutor::notifyJobBegin(uint32_t job_index)
-{
- auto subgraph_index = _job_to_subgraph[job_index];
- // Workaround - assumes only one operation
- auto node = _subgraphs->at(subgraph_index).operations().at(0).node;
- const backend::Backend *backend = _lower_info->operation.at(subgraph_index)->backend();
- for (auto &o : _observers)
- {
- o->handleBegin(this, node, backend);
- }
-}
-
-void DataflowExecutor::notifyJobEnd(uint32_t job_index)
-{
- auto subgraph_index = _job_to_subgraph[job_index];
- // Workaround - assumes only one operation
- auto node = _subgraphs->at(subgraph_index).operations().at(0).node;
- const backend::Backend *backend = _lower_info->operation.at(subgraph_index)->backend();
- for (auto &o : _observers)
- {
- o->handleEnd(this, node, backend);
- }
-}
-
} // namespace exec
} // namespace neurun
void executeImpl() override;
- void notifyJobEnd(uint32_t job_index);
- void notifyJobBegin(uint32_t job_index);
-
- void addObserver(std::unique_ptr<IExecutionObserver> ref)
- {
- _observers.emplace_back(std::move(ref));
- };
- void removeObserver(std::unique_ptr<IExecutionObserver> ref) { _observers.remove(ref); };
-
protected:
int64_t calculateRank(const std::vector<model::Element> &operations);
void emplaceToReadyJobs(const uint32_t &id);
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutionObservee.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+void ExecutionObservee::add(std::unique_ptr<IExecutionObserver> observer)
+{
+ _observers.emplace_back(std::move(observer));
+}
+
+void ExecutionObservee::notifyModelBegin(IExecutor *executor)
+{
+ for (auto &o : _observers)
+ {
+ o->handleBegin(executor);
+ }
+}
+
+void ExecutionObservee::notifyModelEnd(IExecutor *executor)
+{
+ for (auto &o : _observers)
+ {
+ o->handleEnd(executor);
+ }
+}
+
+void ExecutionObservee::notifyJobBegin(IExecutor *executor, const model::Operation *operation,
+ const backend::Backend *backend)
+{
+ for (auto &o : _observers)
+ {
+ o->handleBegin(executor, operation, backend);
+ }
+}
+
+void ExecutionObservee::notifyJobEnd(IExecutor *executor, const model::Operation *operation,
+ const backend::Backend *backend)
+{
+ for (auto &o : _observers)
+ {
+ o->handleEnd(executor, operation, backend);
+ }
+}
+
+} // namespace exec
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_EXECUTION_OBSERVEE_H__
+#define __NEURUN_EXEC_EXECUTION_OBSERVEE_H__
+
+#include <list>
+
+#include "exec/ExecutionObservers.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+/**
+ * @brief Class that
+ *
+ */
+class ExecutionObservee
+{
+public:
+ /**
+ * @brief Register an observer
+ *
+ * @param observer Observer to be added
+ */
+ void add(std::unique_ptr<IExecutionObserver> observer);
+ void notifyModelBegin(IExecutor *executor);
+ void notifyModelEnd(IExecutor *executor);
+ void notifyJobBegin(IExecutor *executor, const model::Operation *operation,
+ const backend::Backend *backend);
+ void notifyJobEnd(IExecutor *executor, const model::Operation *operation,
+ const backend::Backend *backend);
+
+private:
+ std::list<std::unique_ptr<IExecutionObserver>> _observers;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_EXECUTION_OBSERVEE__
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
std::unique_ptr<backend::TensorManagerSet> tensor_mgrs)
- : _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
- _operand_context{operand_context}, _lower_info{std::move(lower_info)},
- _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
+ : _model{model}, _subgraphs{std::move(subgraphs)}, _operand_context{operand_context},
+ _lower_info{std::move(lower_info)}, _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
{
// DO NOTHING
}
#include "backend/ExecTime.h"
#include "exec/IFunction.h"
#include "backend/ITensorManager.h"
+#include "exec/ExecutionObservee.h"
#include <list>
namespace neurun
virtual void executeImpl(void) = 0;
+ void addObserver(std::unique_ptr<IExecutionObserver> ref) { _subject.add(std::move(ref)); };
+
private:
std::unique_ptr<ISource> source(const model::IOIndex &index, const model::TypeInfo &type,
const void *buffer, size_t length, model::Layout io_layout);
}
protected:
- std::list<std::unique_ptr<IExecutionObserver>> _observers;
+ ExecutionObservee _subject;
std::shared_ptr<model::OperationIndexMap<int64_t>> _indexed_ranks;
std::shared_ptr<const model::Model> _model;
std::unique_ptr<model::Subgraphs> _subgraphs;