}
typedef void (*render_copyfunc_t)(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
render_copyfunc_t get_render_copyfunc(int devid);
void gen8_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
void gen7_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
void gen6_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
void gen3_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
void gen2_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y);
}
static void
-gen6_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
+gen6_render_flush(struct intel_batchbuffer *batch,
+ drm_intel_context *context, uint32_t batch_end)
{
int ret;
ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
if (ret == 0)
- ret = drm_intel_bo_mrb_exec(batch->bo, batch_end,
- NULL, 0, 0, 0);
+ ret = drm_intel_gem_bo_context_exec(batch->bo, context,
+ batch_end, 0);
assert(ret == 0);
}
}
void gen6_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
uint32_t cc_vp, cc_blend, offset;
uint32_t batch_end;
- intel_batchbuffer_flush(batch);
+ intel_batchbuffer_flush_with_context(batch, context);
batch->ptr = batch->buffer + 1024;
batch_alloc(batch, 64, 64);
emit_vertex_normalized(batch, src_x, buf_width(src));
emit_vertex_normalized(batch, src_y, buf_height(src));
- gen6_render_flush(batch, batch_end);
+ gen6_render_flush(batch, context, batch_end);
intel_batchbuffer_reset(batch);
}
}
static void
-gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
+gen7_render_flush(struct intel_batchbuffer *batch,
+ drm_intel_context *context, uint32_t batch_end)
{
int ret;
ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
if (ret == 0)
- ret = drm_intel_bo_mrb_exec(batch->bo, batch_end,
- NULL, 0, 0, 0);
+ ret = drm_intel_gem_bo_context_exec(batch->bo, context,
+ batch_end, 0);
assert(ret == 0);
}
#define BATCH_STATE_SPLIT 2048
void gen7_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
{
uint32_t batch_end;
- intel_batchbuffer_flush(batch);
+ intel_batchbuffer_flush_with_context(batch, context);
batch->state = &batch->buffer[BATCH_STATE_SPLIT];
batch_end = ALIGN(batch_end, 8);
assert(batch_end < BATCH_STATE_SPLIT);
- gen7_render_flush(batch, batch_end);
+ gen7_render_flush(batch, context, batch_end);
intel_batchbuffer_reset(batch);
}
}
static void
-gen6_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
+gen6_render_flush(struct intel_batchbuffer *batch,
+ drm_intel_context *context, uint32_t batch_end)
{
int ret;
ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
if (ret == 0)
- ret = drm_intel_bo_mrb_exec(batch->bo, batch_end,
- NULL, 0, 0, 0);
+ ret = drm_intel_gem_bo_context_exec(batch->bo, context,
+ batch_end, 0);
assert(ret == 0);
}
#define BATCH_STATE_SPLIT 2048
void gen8_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
uint32_t vertex_buffer;
uint32_t batch_end;
- intel_batchbuffer_flush(batch);
+ intel_batchbuffer_flush_with_context(batch, context);
batch_align(batch, 8);
annotation_flush(&aub_annotations, batch);
- gen6_render_flush(batch, batch_end);
+ gen6_render_flush(batch, context, batch_end);
intel_batchbuffer_reset(batch);
}
}
void gen2_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
#include "rendercopy.h"
void gen3_render_copyfunc(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
if (uncontexted) {
igt_assert(rendercopy);
- rendercopy(batch, &src, 0, 0, 0, 0, &dst, 0, 0);
+ rendercopy(batch, NULL, &src, 0, 0, 0, 0, &dst, 0, 0);
} else {
int ret;
ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, true);
}
- render_copy(batch,
+ render_copy(batch, NULL,
&src, 0, 0, WIDTH, HEIGHT,
&dst, WIDTH / 2, HEIGHT / 2);
dst.tiling = I915_TILING_NONE;
dst.size = SIZE;
- render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+ render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
start_val[(i + 1) % count] = start_val[i % count];
}
for (i = 0; i < count; i++)
dst.tiling = I915_TILING_NONE;
dst.size = SIZE;
- render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+ render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
start_val[i % count] = start_val[(i + 1) % count];
}
for (i = 0; i < count; i++)
dst.tiling = I915_TILING_NONE;
dst.size = SIZE;
- render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+ render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
start_val[d] = start_val[s];
}
for (i = 0; i < count; i++)
int src = i % count;
int dst = (i + 1) % count;
- render_copy(batch, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+ render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
start_val[dst] = start_val[src];
}
for (i = 0; i < count; i++)
int src = (i + 1) % count;
int dst = i % count;
- render_copy(batch, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+ render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
start_val[dst] = start_val[src];
}
for (i = 0; i < count; i++)
if (src == dst)
continue;
- render_copy(batch, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+ render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
start_val[dst] = start_val[src];
}
for (i = 0; i < count; i++)
igt_assert(y < height);
/* Dummy load to fill the ring */
- copy(batch, &src, 0, 0, width, height, &tmp, 0, 0);
+ copy(batch, NULL, &src, 0, 0, width, height, &tmp, 0, 0);
/* And copy the src into dst, pixel by pixel */
- copy(batch, &src, x, y, 1, 1, &dst, x, y);
+ copy(batch, NULL, &src, x, y, 1, 1, &dst, x, y);
}
/* verify */
}
static void blt_copy(struct intel_batchbuffer *batch,
+ drm_intel_context *context,
struct scratch_buf *src, unsigned src_x, unsigned src_y,
unsigned w, unsigned h,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
static int warned = 0;
if (rendercopy) {
- rendercopy(batch_3d,
+ rendercopy(batch_3d, NULL,
src, src_x, src_y,
width, height,
dst, dst_x, dst_y);
keep_gpu_busy();
if (rendercopy)
- rendercopy(batch, src, src_x, src_y,
+ rendercopy(batch, NULL, src, src_x, src_y,
options.tile_size, options.tile_size,
dst, dst_x, dst_y);
else
for (i = 0; i < limit; i++) {
struct scratch_buf *tmp;
- copyfunc(batch,
+ copyfunc(batch, NULL,
src, 0, 0,
o->fb_width, o->fb_height,
dst, 0, 0);