2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "../../winsys/radeon/drm/radeon_winsys.h"
30 #include "util/u_double_list.h"
31 #include "util/u_transfer.h"
33 #define R600_ERR(fmt, args...) \
34 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
75 struct r600_tiling_info {
76 unsigned num_channels;
81 struct r600_resource {
85 struct pb_buffer *buf;
86 struct radeon_winsys_cs_handle *cs_buf;
92 #define R600_BLOCK_MAX_BO 32
93 #define R600_BLOCK_MAX_REG 128
95 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
96 /* there is a block entry for each register so 512 blocks */
97 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
98 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
99 #define RANGE_OFFSET_START 0x8000
101 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
103 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
104 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
106 struct r600_pipe_reg {
108 struct r600_block *block;
109 struct r600_resource *bo;
110 enum radeon_bo_usage bo_usage;
114 struct r600_pipe_state {
117 struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
120 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
121 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
123 struct r600_block_reloc {
124 struct r600_resource *bo;
125 enum radeon_bo_usage bo_usage;
126 unsigned bo_pm4_index;
130 struct list_head list;
131 struct list_head enable_list;
134 unsigned start_offset;
135 unsigned pm4_ndwords;
140 uint32_t pm4[R600_BLOCK_MAX_REG];
141 unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
142 struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
146 struct r600_block **blocks;
149 struct r600_query_buffer {
150 /* The buffer where query results are stored. */
151 struct r600_resource *buf;
152 /* Offset of the next free result after current query data */
153 unsigned results_end;
154 /* If a query buffer is full, a new buffer is created and the old one
155 * is put in here. When we calculate the result, we sum up the samples
156 * from all buffers. */
157 struct r600_query_buffer *previous;
161 /* The query buffer and how many results are in it. */
162 struct r600_query_buffer buffer;
163 /* The type of query */
165 /* Size of the result in memory for both begin_query and end_query,
166 * this can be one or two numbers, or it could even be a size of a structure. */
167 unsigned result_size;
168 /* The number of dwords for begin_query or end_query. */
170 /* linked list of queries */
171 struct list_head list;
174 struct r600_so_target {
175 struct pipe_stream_output_target b;
177 /* The buffer where BUFFER_FILLED_SIZE is stored. */
178 struct r600_resource *filled_size;
179 unsigned stride_in_dw;
183 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
184 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
189 void r600_get_backend_mask(struct r600_context *ctx);
190 int r600_context_init(struct r600_context *ctx);
191 void r600_context_fini(struct r600_context *ctx);
192 void r600_context_pipe_state_emit(struct r600_context *ctx, struct r600_pipe_state *state, unsigned pkt_flags);
193 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
194 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
195 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
196 void r600_context_flush(struct r600_context *ctx, unsigned flags);
198 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
199 unsigned offset, unsigned value);
200 void r600_inval_shader_cache(struct r600_context *ctx);
201 void r600_inval_texture_cache(struct r600_context *ctx);
202 void r600_inval_vertex_cache(struct r600_context *ctx);
203 void r600_flush_framebuffer(struct r600_context *ctx, bool flush_now);
205 void r600_context_streamout_begin(struct r600_context *ctx);
206 void r600_context_streamout_end(struct r600_context *ctx);
207 void r600_context_draw_opaque_count(struct r600_context *ctx, struct r600_so_target *t);
208 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
209 void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block, unsigned pkt_flags);
211 int evergreen_context_init(struct r600_context *ctx);
212 void evergreen_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
213 void evergreen_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
215 void _r600_pipe_state_add_reg_bo(struct r600_context *ctx,
216 struct r600_pipe_state *state,
217 uint32_t offset, uint32_t value,
218 uint32_t range_id, uint32_t block_id,
219 struct r600_resource *bo,
220 enum radeon_bo_usage usage);
222 void _r600_pipe_state_add_reg(struct r600_context *ctx,
223 struct r600_pipe_state *state,
224 uint32_t offset, uint32_t value,
225 uint32_t range_id, uint32_t block_id);
227 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
228 uint32_t offset, uint32_t value,
229 struct r600_resource *bo,
230 enum radeon_bo_usage usage);
232 #define r600_pipe_state_add_reg_bo(state, offset, value, bo, usage) _r600_pipe_state_add_reg_bo(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
233 #define r600_pipe_state_add_reg(state, offset, value) _r600_pipe_state_add_reg(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset))
235 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
238 state->regs[state->nregs].value = value;