/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
void set_num_threads_with_affinity(unsigned int num_threads, BindFunc func) override;
unsigned int num_threads() const override;
void schedule(ICPPKernel *kernel, const Hints &hints) override;
- void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override;
protected:
/** Will run the workloads in parallel using num_threads
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
{
class ICPPKernel;
class ITensor;
+class Window;
/** Scheduler interface to run kernels */
class IScheduler
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
+ * @param[in] window Window to use for kernel execution.
* @param[in] tensors Vector containing the tensors to operate on.
*/
- virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) = 0;
+ virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) = 0;
/** Execute all the passed workloads
*
virtual void run_workloads(std::vector<Workload> &workloads) = 0;
CPUInfo _cpu_info;
- void schedule_common(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors);
+ /** Common scheduler logic to execute the given kernel
+ *
+ * @param[in] kernel Kernel to execute.
+ * @param[in] hints Hints for the scheduler.
+ * @param[in] window Window to use for kernel execution.
+ * @param[in] tensors Vector containing the tensors to operate on.
+ */
+ void schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors);
private:
unsigned int _num_threads_hint = {};
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
+ * @param[in] window Window to use for kernel execution.
* @param[in] tensors Vector containing the tensors to operate on.
*/
- void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override;
protected:
/** Execute all the passed workloads
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
+ * @param[in] window Window to use for kernel execution.
* @param[in] tensors Vector containing the tensors to operate on.
*/
- void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override;
protected:
/** Will run the workloads sequentially and in order.
}
#endif /* DOXYGEN_SKIP_THIS */
-void CPPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
+void CPPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
{
- schedule_common(kernel, hints, tensors);
+ schedule_common(kernel, hints, window, tensors);
}
void CPPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
{
ITensorPack tensors;
- schedule_common(kernel, hints, tensors);
+ schedule_common(kernel, hints, kernel->window(), tensors);
}
} // namespace arm_compute
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
kernel->run(kernel->window(), info);
}
-void SingleThreadScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
+void SingleThreadScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
{
ARM_COMPUTE_UNUSED(hints);
ThreadInfo info;
info.cpu_info = &_cpu_info;
- kernel->run_op(tensors, kernel->window(), info);
+ kernel->run_op(tensors, window, info);
}
void SingleThreadScheduler::run_workloads(std::vector<Workload> &workloads)
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
return _num_threads_hint;
}
-void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
+void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
- ARM_COMPUTE_UNUSED(kernel);
- ARM_COMPUTE_UNUSED(hints);
- ARM_COMPUTE_UNUSED(tensors);
#ifndef BARE_METAL
- const Window &max_window = kernel->window();
+ const Window &max_window = window;
if(hints.split_dimension() == IScheduler::split_dimensions_all)
{
/*
run_workloads(workloads);
}
}
+#else /* !BARE_METAL */
+ ARM_COMPUTE_UNUSED(kernel, hints, window, tensors);
#endif /* !BARE_METAL */
}
ARM_COMPUTE_ERROR("No inputs provided");
}
- NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, tensors);
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
}
void INEOperator::prepare(ITensorPack &constants)
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
void NELogicalAnd::run()
{
- NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->pack);
+ NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->kernel->window(), _impl->pack);
}
struct NELogicalOr::Impl : public LogicalArgs
void NELogicalOr::run()
{
- NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->pack);
+ NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->kernel->window(), _impl->pack);
}
struct NELogicalNot::Impl : public LogicalArgs
void NELogicalNot::run()
{
- NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->pack);
+ NEScheduler::get().schedule_op(_impl->kernel.get(), Window::DimY, _impl->kernel->window(), _impl->pack);
}
} // namespace arm_compute
if(_is_global_pooling_layer)
{
- NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, tensors);
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, _kernel->window(), tensors);
}
else
{
- NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, tensors);
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
}
}
} // namespace experimental
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
void OMPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
{
ITensorPack tensors;
- schedule_common(kernel, hints, tensors);
+ schedule_common(kernel, hints, kernel->window(), tensors);
}
-void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
+void OMPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
{
ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
ARM_COMPUTE_ERROR_ON_MSG(hints.strategy() == StrategyHint::DYNAMIC,
"Dynamic scheduling is not supported in OMPScheduler");
- const Window &max_window = kernel->window();
+ const Window &max_window = window;
const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
const unsigned int num_threads = std::min(num_iterations, _num_threads);
ITensorPack pack;
pack.add_tensor(TensorType::ACL_SRC, tensors.get_const_tensor(ACL_SRC_VEC + i));
pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(ACL_DST));
- NEScheduler::get().schedule_op(k.get(), Window::DimY, pack);
+ NEScheduler::get().schedule_op(k.get(), Window::DimY, k->window(), pack);
++i;
}
}
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
_kernels.push_back(std::move(info));
}
- void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override
{
_timer.start();
- _real_scheduler.schedule_op(kernel, hints, tensors);
+ _real_scheduler.schedule_op(kernel, hints, window, tensors);
_timer.stop();
typename SchedulerClock<output_timestamps>::kernel_info info;