2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "intel_fbo.h"
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
40 /***********************************************************************
41 * WM unit - fragment programs and rasterization
45 brw_color_buffer_write_enabled(struct brw_context *brw)
47 struct gl_context *ctx = &brw->intel.ctx;
48 const struct gl_fragment_program *fp = brw->fragment_program;
52 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
53 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
57 (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_COLOR) ||
58 fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DATA0 + i)) &&
59 (ctx->Color.ColorMask[i][0] ||
60 ctx->Color.ColorMask[i][1] ||
61 ctx->Color.ColorMask[i][2] ||
62 ctx->Color.ColorMask[i][3])) {
71 * Setup wm hardware state. See page 225 of Volume 2
74 brw_prepare_wm_unit(struct brw_context *brw)
76 struct intel_context *intel = &brw->intel;
77 struct gl_context *ctx = &intel->ctx;
78 const struct gl_fragment_program *fp = brw->fragment_program;
79 struct brw_wm_unit_state *wm;
81 wm = brw_state_batch(brw, sizeof(*wm), 32, &brw->wm.state_offset);
82 memset(wm, 0, sizeof(*wm));
84 if (brw->wm.prog_data->prog_offset_16) {
85 /* These two fields should be the same pre-gen6, which is why we
86 * only have one hardware field to program for both dispatch
89 assert(brw->wm.prog_data->first_curbe_grf ==
90 brw->wm.prog_data->first_curbe_grf_16);
93 /* BRW_NEW_PROGRAM_CACHE | CACHE_NEW_WM_PROG */
94 wm->thread0.grf_reg_count = brw->wm.prog_data->reg_blocks;
95 wm->wm9.grf_reg_count_2 = brw->wm.prog_data->reg_blocks_16;
97 wm->thread0.kernel_start_pointer =
98 brw_program_reloc(brw,
99 brw->wm.state_offset +
100 offsetof(struct brw_wm_unit_state, thread0),
101 brw->wm.prog_offset +
102 (wm->thread0.grf_reg_count << 1)) >> 6;
104 wm->wm9.kernel_start_pointer_2 =
105 brw_program_reloc(brw,
106 brw->wm.state_offset +
107 offsetof(struct brw_wm_unit_state, wm9),
108 brw->wm.prog_offset +
109 brw->wm.prog_data->prog_offset_16 +
110 (wm->wm9.grf_reg_count_2 << 1)) >> 6;
112 wm->thread1.depth_coef_urb_read_offset = 1;
113 wm->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
116 wm->thread1.binding_table_entry_count = 0; /* hardware requirement */
118 /* BRW_NEW_NR_SURFACES */
119 wm->thread1.binding_table_entry_count = brw->wm.nr_surfaces;
122 if (brw->wm.prog_data->total_scratch != 0) {
123 wm->thread2.scratch_space_base_pointer =
124 brw->wm.scratch_bo->offset >> 10; /* reloc */
125 wm->thread2.per_thread_scratch_space =
126 ffs(brw->wm.prog_data->total_scratch) - 11;
128 wm->thread2.scratch_space_base_pointer = 0;
129 wm->thread2.per_thread_scratch_space = 0;
132 wm->thread3.dispatch_grf_start_reg = brw->wm.prog_data->first_curbe_grf;
133 wm->thread3.urb_entry_read_length = brw->wm.prog_data->urb_read_length;
134 wm->thread3.urb_entry_read_offset = 0;
135 wm->thread3.const_urb_entry_read_length =
136 brw->wm.prog_data->curb_read_length;
137 /* BRW_NEW_CURBE_OFFSETS */
138 wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
141 wm->wm4.sampler_count = 0; /* hardware requirement */
143 /* CACHE_NEW_SAMPLER */
144 wm->wm4.sampler_count = (brw->wm.sampler_count + 1) / 4;
147 if (brw->wm.sampler_count) {
149 wm->wm4.sampler_state_pointer = (intel->batch.bo->offset +
150 brw->wm.sampler_offset) >> 5;
152 wm->wm4.sampler_state_pointer = 0;
155 /* BRW_NEW_FRAGMENT_PROGRAM */
156 wm->wm5.program_uses_depth = (fp->Base.InputsRead &
157 (1 << FRAG_ATTRIB_WPOS)) != 0;
158 wm->wm5.program_computes_depth = (fp->Base.OutputsWritten &
159 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) != 0;
161 * Override for NULL depthbuffer case, required by the Pixel Shader Computed
164 if (!intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH))
165 wm->wm5.program_computes_depth = 0;
168 wm->wm5.program_uses_killpixel = fp->UsesKill || ctx->Color.AlphaEnabled;
171 /* BRW_NEW_FRAGMENT_PROGRAM
173 * If using the fragment shader backend, the program is always
174 * 8-wide. If not, it's always 16.
176 if (ctx->Shader.CurrentFragmentProgram) {
177 struct brw_shader *shader = (struct brw_shader *)
178 ctx->Shader.CurrentFragmentProgram->_LinkedShaders[MESA_SHADER_FRAGMENT];
180 if (shader != NULL && shader->ir != NULL) {
181 wm->wm5.enable_8_pix = 1;
182 if (brw->wm.prog_data->prog_offset_16)
183 wm->wm5.enable_16_pix = 1;
186 if (!wm->wm5.enable_8_pix)
187 wm->wm5.enable_16_pix = 1;
189 wm->wm5.max_threads = brw->wm_max_threads - 1;
191 /* _NEW_BUFFERS | _NEW_COLOR */
192 if (brw_color_buffer_write_enabled(brw) ||
193 wm->wm5.program_uses_killpixel ||
194 wm->wm5.program_computes_depth) {
195 wm->wm5.thread_dispatch_enable = 1;
198 wm->wm5.legacy_line_rast = 0;
199 wm->wm5.legacy_global_depth_bias = 0;
200 wm->wm5.early_depth_test = 1; /* never need to disable */
201 wm->wm5.line_aa_region_width = 0;
202 wm->wm5.line_endcap_aa_region_width = 1;
204 /* _NEW_POLYGONSTIPPLE */
205 wm->wm5.polygon_stipple = ctx->Polygon.StippleFlag;
208 if (ctx->Polygon.OffsetFill) {
209 wm->wm5.depth_offset = 1;
210 /* Something wierd going on with legacy_global_depth_bias,
211 * offset_constant, scaling and MRD. This value passes glean
212 * but gives some odd results elsewere (eg. the
213 * quad-offset-units test).
215 wm->global_depth_offset_constant = ctx->Polygon.OffsetUnits * 2;
217 /* This is the only value that passes glean:
219 wm->global_depth_offset_scale = ctx->Polygon.OffsetFactor;
223 wm->wm5.line_stipple = ctx->Line.StippleFlag;
226 if (unlikely(INTEL_DEBUG & DEBUG_STATS) || intel->stats_wm)
227 wm->wm4.stats_enable = 1;
229 /* Emit scratch space relocation */
230 if (brw->wm.prog_data->total_scratch != 0) {
231 drm_intel_bo_emit_reloc(intel->batch.bo,
232 brw->wm.state_offset +
233 offsetof(struct brw_wm_unit_state, thread2),
235 wm->thread2.per_thread_scratch_space,
236 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
239 /* Emit sampler state relocation */
240 if (brw->wm.sampler_count != 0) {
241 drm_intel_bo_emit_reloc(intel->batch.bo,
242 brw->wm.state_offset +
243 offsetof(struct brw_wm_unit_state, wm4),
244 intel->batch.bo, (brw->wm.sampler_offset |
245 wm->wm4.stats_enable |
246 (wm->wm4.sampler_count << 2)),
247 I915_GEM_DOMAIN_INSTRUCTION, 0);
250 brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
253 const struct brw_tracked_state brw_wm_unit = {
255 .mesa = (_NEW_POLYGON |
256 _NEW_POLYGONSTIPPLE |
262 .brw = (BRW_NEW_BATCH |
263 BRW_NEW_PROGRAM_CACHE |
264 BRW_NEW_FRAGMENT_PROGRAM |
265 BRW_NEW_CURBE_OFFSETS |
266 BRW_NEW_NR_WM_SURFACES),
268 .cache = (CACHE_NEW_WM_PROG |
271 .prepare = brw_prepare_wm_unit,