#include "drm-uapi/i915_drm.h"
#include "iris/iris_bufmgr.h"
+#include "iris_batch.h"
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
return i915_gem_mmap_legacy(bufmgr, bo);
}
+static enum pipe_reset_status
+i915_batch_check_for_reset(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ enum pipe_reset_status status = PIPE_NO_RESET;
+ struct drm_i915_reset_stats stats = { .ctx_id = batch->ctx_id };
+
+ if (intel_ioctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
+ DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
+
+ if (stats.batch_active != 0) {
+ /* A reset was observed while a batch from this hardware context was
+ * executing. Assume that this context was at fault.
+ */
+ status = PIPE_GUILTY_CONTEXT_RESET;
+ } else if (stats.batch_pending != 0) {
+ /* A reset was observed while a batch from this context was in progress,
+ * but the batch was not executing. In this case, assume that the
+ * context was not at fault.
+ */
+ status = PIPE_INNOCENT_CONTEXT_RESET;
+ }
+
+ return status;
+}
+
const struct iris_kmd_backend *i915_get_backend(void)
{
static const struct iris_kmd_backend i915_backend = {
.bo_madvise = i915_bo_madvise,
.bo_set_caching = i915_bo_set_caching,
.gem_mmap = i915_gem_mmap,
+ .batch_check_for_reset = i915_batch_check_for_reset,
};
return &i915_backend;
}
#include "iris_bufmgr.h"
#include "iris_context.h"
#include "iris_fence.h"
+#include "iris_kmd_backend.h"
#include "iris_utrace.h"
#include "drm-uapi/i915_drm.h"
iris_batch_check_for_reset(struct iris_batch *batch)
{
struct iris_screen *screen = batch->screen;
- enum pipe_reset_status status = PIPE_NO_RESET;
- struct drm_i915_reset_stats stats = { .ctx_id = batch->ctx_id };
-
- if (intel_ioctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
- DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
-
- if (stats.batch_active != 0) {
- /* A reset was observed while a batch from this hardware context was
- * executing. Assume that this context was at fault.
- */
- status = PIPE_GUILTY_CONTEXT_RESET;
- } else if (stats.batch_pending != 0) {
- /* A reset was observed while a batch from this context was in progress,
- * but the batch was not executing. In this case, assume that the
- * context was not at fault.
- */
- status = PIPE_INNOCENT_CONTEXT_RESET;
- }
+ struct iris_bufmgr *bufmgr = screen->bufmgr;
+ const struct iris_kmd_backend *backend;
+ enum pipe_reset_status status;
+ backend = iris_bufmgr_get_kernel_driver_backend(bufmgr);
+ status = backend->batch_check_for_reset(batch);
if (status != PIPE_NO_RESET) {
/* Our context is likely banned, or at least in an unknown state.
* Throw it away and start with a fresh context. Ideally this may
{
return &bufmgr->devinfo;
}
+
+const struct iris_kmd_backend *
+iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr)
+{
+ return bufmgr->kmd_backend;
+}
uint64_t iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr);
uint64_t iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr);
const struct intel_device_info *iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr);
+const struct iris_kmd_backend *
+iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr);
enum iris_madvice {
IRIS_MADVICE_WILL_NEED = 0,
#include "dev/intel_device_info.h"
#include "dev/intel_kmd.h"
+struct iris_batch;
struct iris_bo;
struct iris_bufmgr;
enum iris_heap;
bool (*bo_madvise)(struct iris_bo *bo, enum iris_madvice state);
int (*bo_set_caching)(struct iris_bo *bo, bool cached);
void *(*gem_mmap)(struct iris_bufmgr *bufmgr, struct iris_bo *bo);
+ enum pipe_reset_status (*batch_check_for_reset)(struct iris_batch *batch);
};
const struct iris_kmd_backend *