}
void
-intel_batchbuffer_save_state(struct intel_context *intel)
-{
- intel->batch.saved.used = intel->batch.used;
- intel->batch.saved.reloc_count =
- drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
-}
-
-void
-intel_batchbuffer_reset_to_saved(struct intel_context *intel)
-{
- drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
-
- intel->batch.used = intel->batch.saved.used;
-
- /* Cached batch state is dead, since we just cleared some unknown part of the
- * batchbuffer. Assume that the caller resets any other state necessary.
- */
- clear_cache(intel);
-}
-
-void
intel_batchbuffer_free(struct intel_context *intel)
{
free(intel->batch.cpu_map);
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
- drm_intel_bo_unreference(intel->batch.workaround_bo);
clear_cache(intel);
}
intel->batch.used += bytes >> 2;
}
-void
-intel_batchbuffer_cached_advance(struct intel_context *intel)
-{
- struct cached_batch_item **prev = &intel->batch.cached_items, *item;
- uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
- uint32_t *start = intel->batch.map + intel->batch.emit;
- uint16_t op = *start >> 16;
-
- while (*prev) {
- uint32_t *old;
-
- item = *prev;
- old = intel->batch.map + item->header;
- if (op == *old >> 16) {
- if (item->size == sz && memcmp(old, start, sz) == 0) {
- if (prev != &intel->batch.cached_items) {
- *prev = item->next;
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
- }
- intel->batch.used = intel->batch.emit;
- return;
- }
-
- goto emit;
- }
- prev = &item->next;
- }
-
- item = malloc(sizeof(struct cached_batch_item));
- if (item == NULL)
- return;
-
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
-
-emit:
- item->size = sz;
- item->header = intel->batch.emit;
-}
-
-/**
- * Restriction [DevSNB, DevIVB]:
- *
- * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
- * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
- * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
- * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
- * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
- * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
- * unless SW can otherwise guarantee that the pipeline from WM onwards is
- * already flushed (e.g., via a preceding MI_FLUSH).
- */
-void
-intel_emit_depth_stall_flushes(struct intel_context *intel)
-{
- assert(intel->gen >= 6 && intel->gen <= 7);
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH()
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
-}
-
-/**
- * From the BSpec, volume 2a.03: VS Stage Input / State:
- * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
- * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
- * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
- * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
- * to be sent before any combination of VS associated 3DSTATE."
- */
-void
-gen7_emit_vs_workaround_flush(struct intel_context *intel)
-{
- assert(intel->gen == 7);
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
-}
-
-/**
- * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
- * implementing two workarounds on gen6. From section 1.4.7.1
- * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
- *
- * [DevSNB-C+{W/A}] Before any depth stall flush (including those
- * produced by non-pipelined state commands), software needs to first
- * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
- * 0.
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
- * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
- *
- * And the workaround for these two requires this workaround first:
- *
- * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
- * BEFORE the pipe-control with a post-sync op and no write-cache
- * flushes.
- *
- * And this last workaround is tricky because of the requirements on
- * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
- * volume 2 part 1:
- *
- * "1 of the following must also be set:
- * - Render Target Cache Flush Enable ([12] of DW1)
- * - Depth Cache Flush Enable ([0] of DW1)
- * - Stall at Pixel Scoreboard ([1] of DW1)
- * - Depth Stall ([13] of DW1)
- * - Post-Sync Operation ([13] of DW1)
- * - Notify Enable ([8] of DW1)"
- *
- * The cache flushes require the workaround flush that triggered this
- * one, so we can't use it. Depth stall would trigger the same.
- * Post-sync nonzero is what triggered this second workaround, so we
- * can't use that one either. Notify enable is IRQs, which aren't
- * really our business. That leaves only stall at scoreboard.
- */
-void
-intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
-{
- if (!intel->batch.need_workaround_flush)
- return;
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
-
- intel->batch.need_workaround_flush = false;
-}
-
/* Emit a pipelined flush to either flush render and texture cache for
* reading from a FBO-drawn texture, or flush so that frontbuffer
* render appears on the screen in DRI1.
intel->upload.offset = base + size;
}
-void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
-{
- GLuint base, delta;
- char *ptr;
-
- base = (intel->upload.offset + align - 1) / align * align;
- if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
- wrap_buffers(intel, size);
- base = 0;
- }
-
- delta = base - intel->upload.offset;
- if (intel->upload.buffer_len &&
- intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
- {
- drm_intel_bo_subdata(intel->upload.bo,
- intel->upload.buffer_offset,
- intel->upload.buffer_len,
- intel->upload.buffer);
- intel->upload.buffer_len = 0;
- }
-
- if (size <= sizeof(intel->upload.buffer)) {
- if (intel->upload.buffer_len == 0)
- intel->upload.buffer_offset = base;
- else
- intel->upload.buffer_len += delta;
-
- ptr = intel->upload.buffer + intel->upload.buffer_len;
- intel->upload.buffer_len += size;
- } else
- ptr = malloc(size);
-
- return ptr;
-}
-
-void intel_upload_unmap(struct intel_context *intel,
- const void *ptr, GLuint size, GLuint align,
- drm_intel_bo **return_bo,
- GLuint *return_offset)
-{
- GLuint base;
-
- base = (intel->upload.offset + align - 1) / align * align;
- if (size > sizeof(intel->upload.buffer)) {
- drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
- free((void*)ptr);
- }
-
- drm_intel_bo_reference(intel->upload.bo);
- *return_bo = intel->upload.bo;
- *return_offset = base;
-
- intel->upload.offset = base + size;
-}
-
drm_intel_bo *
intel_bufferobj_source(struct intel_context *intel,
struct intel_buffer_object *intel_obj,
mask, filter);
}
-void
-intel_renderbuffer_move_to_temp(struct intel_context *intel,
- struct intel_renderbuffer *irb,
- bool invalidate)
-{
- struct gl_renderbuffer *rb =&irb->Base.Base;
- struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
- struct intel_mipmap_tree *new_mt;
- int width, height, depth;
-
- intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
-
- new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
- intel_image->base.Base.TexFormat,
- intel_image->base.Base.Level,
- intel_image->base.Base.Level,
- width, height, depth,
- true,
- INTEL_MIPTREE_TILING_ANY);
-
- intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
-
- intel_miptree_reference(&irb->mt, intel_image->mt);
- intel_renderbuffer_set_draw_offset(irb);
- intel_miptree_release(&new_mt);
-}
-
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.