1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
10 #define BATCH_RESERVED 16
12 void intel_batchbuffer_init(struct intel_context *intel);
13 void intel_batchbuffer_reset(struct intel_context *intel);
14 void intel_batchbuffer_free(struct intel_context *intel);
15 void intel_batchbuffer_save_state(struct intel_context *intel);
16 void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
18 int _intel_batchbuffer_flush(struct intel_context *intel,
19 const char *file, int line);
21 #define intel_batchbuffer_flush(intel) \
22 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
26 /* Unlike bmBufferData, this currently requires the buffer be mapped.
27 * Consider it a convenience function wrapping multple
28 * intel_buffer_dword() calls.
30 void intel_batchbuffer_data(struct intel_context *intel,
31 const void *data, GLuint bytes, bool is_blit);
33 bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
35 uint32_t read_domains,
36 uint32_t write_domain,
38 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
40 uint32_t read_domains,
41 uint32_t write_domain,
43 void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
44 void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
45 void intel_emit_depth_stall_flushes(struct intel_context *intel);
46 void gen7_emit_vs_workaround_flush(struct intel_context *intel);
48 static INLINE uint32_t float_as_int(float f)
59 /* Inline functions - might actually be better off with these
60 * non-inlined. Certainly better off switching all command packets to
61 * be passed as structs rather than dwords, but that's a little bit of
64 static INLINE unsigned
65 intel_batchbuffer_space(struct intel_context *intel)
67 return (intel->batch.state_batch_offset - intel->batch.reserved_space)
68 - intel->batch.used*4;
73 intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
76 assert(intel_batchbuffer_space(intel) >= 4);
78 intel->batch.map[intel->batch.used++] = dword;
82 intel_batchbuffer_emit_float(struct intel_context *intel, float f)
84 intel_batchbuffer_emit_dword(intel, float_as_int(f));
88 intel_batchbuffer_require_space(struct intel_context *intel,
89 GLuint sz, int is_blit)
92 if (intel->gen >= 6 &&
93 intel->batch.is_blit != is_blit && intel->batch.used) {
94 intel_batchbuffer_flush(intel);
97 intel->batch.is_blit = is_blit;
100 assert(sz < sizeof(intel->batch.map) - BATCH_RESERVED);
102 if (intel_batchbuffer_space(intel) < sz)
103 intel_batchbuffer_flush(intel);
107 intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
109 intel_batchbuffer_require_space(intel, n * 4, is_blit);
111 intel->batch.emit = intel->batch.used;
113 intel->batch.total = n;
118 intel_batchbuffer_advance(struct intel_context *intel)
121 struct intel_batchbuffer *batch = &intel->batch;
122 unsigned int _n = batch->used - batch->emit;
123 assert(batch->total != 0);
124 if (_n != batch->total) {
125 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
133 void intel_batchbuffer_cached_advance(struct intel_context *intel);
135 /* Here are the crusty old macros, to be removed:
139 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
140 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
141 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
142 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
143 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
144 intel_batchbuffer_emit_reloc(intel, buf, \
145 read_domains, write_domain, delta); \
147 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
148 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
149 read_domains, write_domain, delta); \
152 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
153 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);