1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
8 #include "ivpu_jsm_msg.h"
10 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
11 u64 jobq_base, u32 jobq_size)
13 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
14 struct vpu_jsm_msg resp;
17 req.payload.register_db.db_idx = db_id;
18 req.payload.register_db.jobq_base = jobq_base;
19 req.payload.register_db.jobq_size = jobq_size;
20 req.payload.register_db.host_ssid = ctx_id;
22 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
23 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
25 ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
29 ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
34 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
36 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
37 struct vpu_jsm_msg resp;
40 req.payload.unregister_db.db_idx = db_id;
42 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
43 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
45 ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
49 ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
54 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
56 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
57 struct vpu_jsm_msg resp;
60 if (engine > VPU_ENGINE_COPY)
63 req.payload.query_engine_hb.engine_idx = engine;
65 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
66 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
68 ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
72 *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
76 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
78 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
79 struct vpu_jsm_msg resp;
82 if (engine > VPU_ENGINE_COPY)
85 req.payload.engine_reset.engine_idx = engine;
87 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
88 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
90 ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
95 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
97 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
98 struct vpu_jsm_msg resp;
101 if (engine > VPU_ENGINE_COPY)
104 req.payload.engine_preempt.engine_idx = engine;
105 req.payload.engine_preempt.preempt_id = preempt_id;
107 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
108 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
110 ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
115 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
117 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
118 struct vpu_jsm_msg resp;
121 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
123 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
124 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
126 ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
131 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
132 u64 *trace_hw_component_mask)
134 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
135 struct vpu_jsm_msg resp;
138 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
139 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
141 ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
145 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
146 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
151 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
152 u64 trace_hw_component_mask)
154 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
155 struct vpu_jsm_msg resp;
158 req.payload.trace_config.trace_level = trace_level;
159 req.payload.trace_config.trace_destination_mask = trace_destination_mask;
160 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
162 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
163 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
165 ivpu_warn(vdev, "Failed to set config: %d\n", ret);
170 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
172 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
173 struct vpu_jsm_msg resp;
175 req.payload.ssid_release.host_ssid = host_ssid;
177 return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
178 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);