*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
+ spin_lock_init(&ct->ctbs.send.lock);
+ spin_lock_init(&ct->ctbs.recv.lock);
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
+ spin_lock_irqsave(&ct->ctbs.send.lock, flags);
+
fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
- spin_lock_irqsave(&ct->requests.lock, flags);
+ spin_lock(&ct->requests.lock);
list_add_tail(&request.link, &ct->requests.pending);
- spin_unlock_irqrestore(&ct->requests.lock, flags);
+ spin_unlock(&ct->requests.lock);
err = ct_write(ct, action, len, fence);
+
+ spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
+
if (unlikely(err))
goto unlink;
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
+ unsigned long flags;
int err = 0;
if (unlikely(!ct->enabled)) {
}
do {
+ spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
err = ct_read(ct, msg);
+ spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
if (err)
break;
* record (command transport buffer descriptor) and the actual buffer which
* holds the commands.
*
+ * @lock: protects access to the commands buffer and buffer descriptor
* @desc: pointer to the buffer descriptor
* @cmds: pointer to the commands buffer
* @size: size of the commands buffer
*/
struct intel_guc_ct_buffer {
+ spinlock_t lock;
struct guc_ct_buffer_desc *desc;
u32 *cmds;
u32 size;