void __user *chunks;
u32 num_chunks;
u64 cs_seq = ULONG_MAX;
- int rc, do_restore;
+ int rc, do_ctx_switch;
bool need_soft_reset = false;
if (hl_device_disabled_or_in_reset(hdev)) {
goto out;
}
- do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
+ do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
- if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+ if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
long ret;
chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
mutex_lock(&hpriv->restore_phase_mutex);
- if (do_restore) {
+ if (do_ctx_switch) {
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
if (rc) {
dev_err_ratelimited(hdev->dev,
}
}
- ctx->thread_restore_wait_token = 1;
- } else if (!ctx->thread_restore_wait_token) {
+ ctx->thread_ctx_switch_wait_token = 1;
+ } else if (!ctx->thread_ctx_switch_wait_token) {
u32 tmp;
rc = hl_poll_timeout_memory(hdev,
- (u64) (uintptr_t) &ctx->thread_restore_wait_token,
+ (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
jiffies_to_usecs(hdev->timeout_jiffies),
&tmp);
if (rc || !tmp) {
dev_err(hdev->dev,
- "restore phase hasn't finished in time\n");
+ "context switch phase didn't finish in time\n");
rc = -ETIMEDOUT;
goto out;
}
ctx->cs_sequence = 1;
spin_lock_init(&ctx->cs_lock);
- atomic_set(&ctx->thread_restore_token, 1);
- ctx->thread_restore_wait_token = 0;
+ atomic_set(&ctx->thread_ctx_switch_token, 1);
+ ctx->thread_ctx_switch_wait_token = 0;
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
- /* Make sure the setup phase for the user context will run again */
+ /* Make sure the context switch phase will run again */
if (hdev->user_ctx) {
- atomic_set(&hdev->user_ctx->thread_restore_token, 1);
- hdev->user_ctx->thread_restore_wait_token = 0;
+ atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
+ hdev->user_ctx->thread_ctx_switch_wait_token = 0;
}
/* Finished tear-down, starting to re-initialize */
* DRAM mapping.
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
- * @thread_restore_token: token to prevent multiple threads of the same context
- * from running the restore phase. Only one thread
- * should run it.
- * @thread_restore_wait_token: token to prevent the threads that didn't run
- * the restore phase from moving to their execution
- * phase before the restore phase has finished.
+ * @thread_ctx_switch_token: token to prevent multiple threads of the same
+ * context from running the context switch phase.
+ * Only a single thread should run it.
+ * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
+ * the context switch phase from moving to their
+ * execution phase before the context switch phase
+ * has finished.
* @asid: context's unique address space ID in the device's MMU.
*/
struct hl_ctx {
u64 *dram_default_hops;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
- atomic_t thread_restore_token;
- u32 thread_restore_wait_token;
+ atomic_t thread_ctx_switch_token;
+ u32 thread_ctx_switch_wait_token;
u32 asid;
};