list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ struct virtio_gpu_ctrl_hdr *resp =
+ (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
- trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
- virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}