* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * 'pvr_write_query_to_buffer()' based on anv:
+ * 'pvr_write_query_to_buffer()' and 'pvr_wait_for_available()' based on anv:
* Copyright © 2015 Intel Corporation
*/
#include "pvr_device_info.h"
#include "pvr_private.h"
#include "util/macros.h"
+#include "util/os_time.h"
#include "vk_log.h"
#include "vk_object.h"
vk_object_free(&device->vk, pAllocator, pool);
}
+/* Note: make sure to make the availability buffer's memory defined in
+ * accordance to how the device is expected to fill it. We don't make it defined
+ * here since that would cover up usage of this function while the underlying
+ * buffer region being accessed wasn't expect to have been written by the
+ * device.
+ */
+static inline bool pvr_query_is_available(const struct pvr_query_pool *pool,
+ uint32_t query_idx)
+{
+ volatile uint32_t *available = pool->availability_buffer->bo->map;
+ return !!available[query_idx];
+}
+
+#define NSEC_PER_SEC UINT64_C(1000000000)
+#define PVR_WAIT_TIMEOUT UINT64_C(5)
+
+/* Note: make sure to make the availability buffer's memory defined in
+ * accordance to how the device is expected to fill it. We don't make it defined
+ * here since that would cover up usage of this function while the underlying
+ * buffer region being accessed wasn't expect to have been written by the
+ * device.
+ */
+/* TODO: Handle device loss scenario properly. */
+static bool pvr_wait_for_available(struct pvr_device *device,
+ const struct pvr_query_pool *pool,
+ uint32_t query_idx)
+{
+ const uint64_t abs_timeout =
+ os_time_get_absolute_timeout(PVR_WAIT_TIMEOUT * NSEC_PER_SEC);
+
+ /* From the Vulkan 1.0 spec:
+ *
+ * Commands that wait indefinitely for device execution (namely
+ * vkDeviceWaitIdle, vkQueueWaitIdle, vkWaitForFences or
+ * vkAcquireNextImageKHR with a maximum timeout, and
+ * vkGetQueryPoolResults with the VK_QUERY_RESULT_WAIT_BIT bit set in
+ * flags) must return in finite time even in the case of a lost device,
+ * and return either VK_SUCCESS or VK_ERROR_DEVICE_LOST.
+ */
+ while (os_time_get_nano() < abs_timeout) {
+ if (pvr_query_is_available(pool, query_idx) != 0)
+ return VK_SUCCESS;
+ }
+
+ return vk_error(device, VK_ERROR_DEVICE_LOST);
+}
+
+#undef NSEC_PER_SEC
+#undef PVR_WAIT_TIMEOUT
+
static inline void pvr_write_query_to_buffer(uint8_t *buffer,
VkQueryResultFlags flags,
uint32_t idx,
PVR_FROM_HANDLE(pvr_query_pool, pool, queryPool);
PVR_FROM_HANDLE(pvr_device, device, _device);
const uint32_t core_count = device->pdevice->dev_runtime_info.core_count;
- volatile uint32_t *available = pool->availability_buffer->bo->map;
+ VG(volatile uint32_t *available = pool->availability_buffer->bo->map);
volatile uint32_t *query_results = pool->result_buffer->bo->map;
uint8_t *data = (uint8_t *)pData;
VkResult result = VK_SUCCESS;
+ /* TODO: Instead of making the memory defined here for valgrind, to better
+ * catch out of bounds access and other memory errors we should move them
+ * where where the query buffers are changed by the driver or device (e.g.
+ * "vkCmdResetQueryPool()", "vkGetQueryPoolResults()", etc.).
+ */
+
VG(VALGRIND_MAKE_MEM_DEFINED(&available[firstQuery],
queryCount * sizeof(uint32_t)));
}
for (uint32_t i = 0; i < queryCount; i++) {
- const bool is_available = !!available[firstQuery + i];
+ bool is_available = pvr_query_is_available(pool, firstQuery + i);
uint64_t count = 0;
uint32_t idx = 0;
- /* From the Vulkan 1.0 spec:
- *
- * Commands that wait indefinitely for device execution (namely
- * vkDeviceWaitIdle, vkQueueWaitIdle, vkWaitForFences or
- * vkAcquireNextImageKHR with a maximum timeout, and
- * vkGetQueryPoolResults with the VK_QUERY_RESULT_WAIT_BIT bit set in
- * flags) must return in finite time even in the case of a lost device,
- * and return either VK_SUCCESS or VK_ERROR_DEVICE_LOST.
- */
- if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- /* Add support to wait for query results to be available. Also handle
- * device loss scenario.
- */
- pvr_finishme("Unimplemented path.");
+ if (flags & VK_QUERY_RESULT_WAIT_BIT && !is_available) {
+ result = pvr_wait_for_available(device, pool, firstQuery + i);
+ if (result != VK_SUCCESS)
+ return result;
+
+ is_available = true;
}
for (uint32_t j = 0; j < core_count; j++)