Tizen 2.1 base
[sdk/emulator/qemu.git] / gl / mesa / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #define BATCH_RESERVED 16
11
12 void intel_batchbuffer_init(struct intel_context *intel);
13 void intel_batchbuffer_reset(struct intel_context *intel);
14 void intel_batchbuffer_free(struct intel_context *intel);
15 void intel_batchbuffer_save_state(struct intel_context *intel);
16 void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
17
18 int _intel_batchbuffer_flush(struct intel_context *intel,
19                              const char *file, int line);
20
21 #define intel_batchbuffer_flush(intel) \
22         _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
23
24
25
26 /* Unlike bmBufferData, this currently requires the buffer be mapped.
27  * Consider it a convenience function wrapping multple
28  * intel_buffer_dword() calls.
29  */
30 void intel_batchbuffer_data(struct intel_context *intel,
31                             const void *data, GLuint bytes, bool is_blit);
32
33 bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
34                                        drm_intel_bo *buffer,
35                                        uint32_t read_domains,
36                                        uint32_t write_domain,
37                                        uint32_t offset);
38 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
39                                               drm_intel_bo *buffer,
40                                               uint32_t read_domains,
41                                               uint32_t write_domain,
42                                               uint32_t offset);
43 void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
44 void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
45 void intel_emit_depth_stall_flushes(struct intel_context *intel);
46 void gen7_emit_vs_workaround_flush(struct intel_context *intel);
47
48 static INLINE uint32_t float_as_int(float f)
49 {
50    union {
51       float f;
52       uint32_t d;
53    } fi;
54
55    fi.f = f;
56    return fi.d;
57 }
58
59 /* Inline functions - might actually be better off with these
60  * non-inlined.  Certainly better off switching all command packets to
61  * be passed as structs rather than dwords, but that's a little bit of
62  * work...
63  */
64 static INLINE unsigned
65 intel_batchbuffer_space(struct intel_context *intel)
66 {
67    return (intel->batch.state_batch_offset - intel->batch.reserved_space)
68       - intel->batch.used*4;
69 }
70
71
72 static INLINE void
73 intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
74 {
75 #ifdef DEBUG
76    assert(intel_batchbuffer_space(intel) >= 4);
77 #endif
78    intel->batch.map[intel->batch.used++] = dword;
79 }
80
81 static INLINE void
82 intel_batchbuffer_emit_float(struct intel_context *intel, float f)
83 {
84    intel_batchbuffer_emit_dword(intel, float_as_int(f));
85 }
86
87 static INLINE void
88 intel_batchbuffer_require_space(struct intel_context *intel,
89                                 GLuint sz, int is_blit)
90 {
91
92    if (intel->gen >= 6 &&
93        intel->batch.is_blit != is_blit && intel->batch.used) {
94       intel_batchbuffer_flush(intel);
95    }
96
97    intel->batch.is_blit = is_blit;
98
99 #ifdef DEBUG
100    assert(sz < sizeof(intel->batch.map) - BATCH_RESERVED);
101 #endif
102    if (intel_batchbuffer_space(intel) < sz)
103       intel_batchbuffer_flush(intel);
104 }
105
106 static INLINE void
107 intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
108 {
109    intel_batchbuffer_require_space(intel, n * 4, is_blit);
110
111    intel->batch.emit = intel->batch.used;
112 #ifdef DEBUG
113    intel->batch.total = n;
114 #endif
115 }
116
117 static INLINE void
118 intel_batchbuffer_advance(struct intel_context *intel)
119 {
120 #ifdef DEBUG
121    struct intel_batchbuffer *batch = &intel->batch;
122    unsigned int _n = batch->used - batch->emit;
123    assert(batch->total != 0);
124    if (_n != batch->total) {
125       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
126               _n, batch->total);
127       abort();
128    }
129    batch->total = 0;
130 #endif
131 }
132
133 void intel_batchbuffer_cached_advance(struct intel_context *intel);
134
135 /* Here are the crusty old macros, to be removed:
136  */
137 #define BATCH_LOCALS
138
139 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
140 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
141 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
142 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
143 #define OUT_RELOC(buf, read_domains, write_domain, delta) do {          \
144    intel_batchbuffer_emit_reloc(intel, buf,                     \
145                                 read_domains, write_domain, delta);     \
146 } while (0)
147 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do {   \
148    intel_batchbuffer_emit_reloc_fenced(intel, buf,              \
149                                        read_domains, write_domain, delta); \
150 } while (0)
151
152 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
153 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
154
155 #endif