stream_ptr += pvr_cmd_length(CR_ISP_DBIAS_BASE);
pvr_csb_pack ((uint64_t *)stream_ptr, CR_ISP_OCLQRY_BASE, value) {
- value.addr = PVR_DEV_ADDR_INVALID;
+ const struct pvr_sub_cmd_gfx *sub_cmd =
+ container_of(job, const struct pvr_sub_cmd_gfx, job);
+
+ if (sub_cmd->query_pool)
+ value.addr = sub_cmd->query_pool->result_buffer->dev_addr;
+ else
+ value.addr = PVR_DEV_ADDR_INVALID;
}
stream_ptr += pvr_cmd_length(CR_ISP_OCLQRY_BASE);
/* Array size of barriers_needed is based on number of sync pipeline
* stages.
*/
- uint32_t barriers_needed[4];
+ uint32_t barriers_needed[PVR_NUM_SYNC_PIPELINE_STAGES];
struct pvr_descriptor_state gfx_desc_state;
struct pvr_descriptor_state compute_desc_state;
* device.
*/
/* TODO: Handle device loss scenario properly. */
-static bool pvr_wait_for_available(struct pvr_device *device,
- const struct pvr_query_pool *pool,
- uint32_t query_idx)
+static VkResult pvr_wait_for_available(struct pvr_device *device,
+ const struct pvr_query_pool *pool,
+ uint32_t query_idx)
{
const uint64_t abs_timeout =
os_time_get_absolute_timeout(PVR_WAIT_TIMEOUT * NSEC_PER_SEC);
PVR_COPY_QUERY_POOL_RESULTS_COUNT);
/* Assert if no memory is bound to destination buffer. */
- assert(buffer->dev_addr.addr == 0);
+ assert(buffer->dev_addr.addr);
addr = buffer->dev_addr;
addr.addr += query_info->copy_query_results.dst_offset;