batch->reset = &ice->reset;
batch->state_sizes = ice->state.sizes;
batch->name = name;
+ batch->ice = ice;
batch->fine_fences.uploader =
u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
#define IRIS_BATCH_COUNT 2
struct iris_batch {
+ struct iris_context *ice;
struct iris_screen *screen;
struct pipe_debug_callback *dbg;
struct pipe_device_reset_callback *reset;
* we do need to inform the context of batch catastrophe. We know the
* batch is one of our context's, so hackily claw our way back.
*/
- struct iris_context *ice = NULL;
+ struct iris_context *ice = batch->ice;
if (batch->name == IRIS_BATCH_RENDER) {
- ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_RENDER]);
- assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
-
batch->screen->vtbl.init_render_context(batch);
} else if (batch->name == IRIS_BATCH_COMPUTE) {
- ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_COMPUTE]);
- assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
-
batch->screen->vtbl.init_compute_context(batch);
} else {
unreachable("unhandled batch reset");
if (subslices_delta == 0)
return;
- struct iris_context *ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_RENDER]);
+ struct iris_context *ice = batch->ice;
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;