/* r300_emit: Functions for emitting state. */
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_math.h"
-#include "util/u_mm.h"
#include "r300_context.h"
#include "r300_cb.h"
struct r300_blend_state* blend = (struct r300_blend_state*)state;
struct pipe_framebuffer_state* fb =
(struct pipe_framebuffer_state*)r300->fb_state.state;
+ struct pipe_surface *cb;
CS_LOCALS(r300);
- if (fb->nr_cbufs) {
- if (fb->cbufs[0]->format == PIPE_FORMAT_R16G16B16A16_FLOAT) {
+ cb = fb->nr_cbufs ? r300_get_nonnull_cb(fb, 0) : NULL;
+
+ if (cb) {
+ if (cb->format == PIPE_FORMAT_R16G16B16A16_FLOAT) {
WRITE_CS_TABLE(blend->cb_noclamp, size);
+ } else if (cb->format == PIPE_FORMAT_R16G16B16X16_FLOAT) {
+ WRITE_CS_TABLE(blend->cb_noclamp_noalpha, size);
} else {
- unsigned swz = r300_surface(fb->cbufs[0])->colormask_swizzle;
+ unsigned swz = r300_surface(cb)->colormask_swizzle;
WRITE_CS_TABLE(blend->cb_clamp[swz], size);
}
} else {
struct r300_dsa_state* dsa = (struct r300_dsa_state*)state;
struct pipe_framebuffer_state* fb =
(struct pipe_framebuffer_state*)r300->fb_state.state;
- boolean is_r500 = r300->screen->caps.is_r500;
+ bool is_r500 = r300->screen->caps.is_r500;
CS_LOCALS(r300);
uint32_t alpha_func = dsa->alpha_function;
/* Choose the alpha ref value between 8-bit (FG_ALPHA_FUNC.AM_VAL) and
* 16-bit (FG_ALPHA_VALUE). */
if (is_r500 && (alpha_func & R300_FG_ALPHA_FUNC_ENABLE)) {
- if (fb->nr_cbufs && fb->cbufs[0]->format == PIPE_FORMAT_R16G16B16A16_FLOAT) {
+ struct pipe_surface *cb = fb->nr_cbufs ? r300_get_nonnull_cb(fb, 0) : NULL;
+
+ if (cb &&
+ (cb->format == PIPE_FORMAT_R16G16B16A16_FLOAT ||
+ cb->format == PIPE_FORMAT_R16G16B16X16_FLOAT)) {
alpha_func |= R500_FG_ALPHA_FUNC_FP16_ENABLE;
} else {
alpha_func |= R500_FG_ALPHA_FUNC_8BIT;
case RC_STATE_R300_TEXSCALE_FACTOR:
tex = r300_resource(texstate->sampler_views[constant->u.State[1]]->base.texture);
/* Add a small number to the texture size to work around rounding errors in hw. */
- vec[0] = tex->b.b.width0 / (tex->tex.width0 + 0.001f);
- vec[1] = tex->b.b.height0 / (tex->tex.height0 + 0.001f);
- vec[2] = tex->b.b.depth0 / (tex->tex.depth0 + 0.001f);
+ vec[0] = tex->b.width0 / (tex->tex.width0 + 0.001f);
+ vec[1] = tex->b.height0 / (tex->tex.height0 + 0.001f);
+ vec[2] = tex->b.depth0 / (tex->tex.depth0 + 0.001f);
vec[3] = 1;
break;
BEGIN_CS(size);
- /* NUM_MULTIWRITES replicates COLOR[0] to all colorbuffers, which is not
- * what we usually want. */
if (r300->screen->caps.is_r500) {
rb3d_cctl = R300_RB3D_CCTL_INDEPENDENT_COLORFORMAT_ENABLE_ENABLE;
}
+ /* NUM_MULTIWRITES replicates COLOR[0] to all colorbuffers. */
if (fb->nr_cbufs && r300->fb_multiwrite) {
rb3d_cctl |= R300_RB3D_CCTL_NUM_MULTIWRITES(fb->nr_cbufs);
}
+ if (r300->cmask_in_use) {
+ rb3d_cctl |= R300_RB3D_CCTL_AA_COMPRESSION_ENABLE |
+ R300_RB3D_CCTL_CMASK_ENABLE;
+ }
OUT_CS_REG(R300_RB3D_CCTL, rb3d_cctl);
/* Set up colorbuffers. */
for (i = 0; i < fb->nr_cbufs; i++) {
- surf = r300_surface(fb->cbufs[i]);
+ surf = r300_surface(r300_get_nonnull_cb(fb, i));
OUT_CS_REG(R300_RB3D_COLOROFFSET0 + (4 * i), surf->offset);
OUT_CS_RELOC(surf);
OUT_CS_REG(R300_RB3D_COLORPITCH0 + (4 * i), surf->pitch);
OUT_CS_RELOC(surf);
+
+ if (r300->cmask_in_use && i == 0) {
+ OUT_CS_REG(R300_RB3D_CMASK_OFFSET0, 0);
+ OUT_CS_REG(R300_RB3D_CMASK_PITCH0, surf->pitch_cmask);
+ OUT_CS_REG(R300_RB3D_COLOR_CLEAR_VALUE, r300->color_clear_value);
+ if (r300->screen->caps.is_r500) {
+ OUT_CS_REG_SEQ(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
+ OUT_CS(r300->color_clear_value_ar);
+ OUT_CS(r300->color_clear_value_gb);
+ }
+ }
}
/* Set up the ZB part of the CBZB clear. */
struct pipe_framebuffer_state* fb =
(struct pipe_framebuffer_state*)r300->fb_state.state;
- unsigned i, num_samples, num_cbufs = fb->nr_cbufs;
+ unsigned i, num_cbufs = fb->nr_cbufs;
unsigned mspos0, mspos1;
CS_LOCALS(r300);
* (must be written after unpipelined regs) */
OUT_CS_REG_SEQ(R300_US_OUT_FMT_0, 4);
for (i = 0; i < num_cbufs; i++) {
- OUT_CS(r300_surface(fb->cbufs[i])->format);
+ OUT_CS(r300_surface(r300_get_nonnull_cb(fb, i))->format);
}
for (; i < 1; i++) {
OUT_CS(R300_US_OUT_FMT_C4_8 |
OUT_CS(R300_US_OUT_FMT_UNUSED);
}
- /* Multisampling. Depends on framebuffer sample count.
- * These are pipelined regs and as such cannot be moved
- * to the AA state.
+ /* Set sample positions. It depends on the framebuffer sample count.
+ * These are pipelined regs and as such cannot be moved to the AA state.
*/
- num_samples = r300->msaa_enable ? r300->num_samples : 1;
-
- /* Sample positions. */
- switch (num_samples) {
+ switch (r300->num_samples) {
default:
mspos0 = r300_get_mspos(0, sample_locs_1x);
mspos1 = r300_get_mspos(1, sample_locs_1x);
}
OUT_CS_REG(R300_ZB_ZPASS_DATA, 0);
END_CS;
- query->begin_emitted = TRUE;
+ query->begin_emitted = true;
}
static void r300_emit_query_end_frag_pipes(struct r300_context *r300,
OUT_CS_REG(R300_SU_REG_DEST, 1 << 3);
OUT_CS_REG(R300_ZB_ZPASS_ADDR, (query->num_results + 3) * 4);
OUT_CS_RELOC(r300->query_current);
+ FALLTHROUGH;
case 3:
/* pipe 2 only */
OUT_CS_REG(R300_SU_REG_DEST, 1 << 2);
OUT_CS_REG(R300_ZB_ZPASS_ADDR, (query->num_results + 2) * 4);
OUT_CS_RELOC(r300->query_current);
+ FALLTHROUGH;
case 2:
/* pipe 1 only */
- /* As mentioned above, accomodate RV380 and older. */
+ /* As mentioned above, accommodate RV380 and older. */
OUT_CS_REG(R300_SU_REG_DEST,
1 << (caps->high_second_pipe ? 3 : 1));
OUT_CS_REG(R300_ZB_ZPASS_ADDR, (query->num_results + 1) * 4);
OUT_CS_RELOC(r300->query_current);
+ FALLTHROUGH;
case 1:
/* pipe 0 only */
OUT_CS_REG(R300_SU_REG_DEST, 1 << 0);
if (!query)
return;
- if (query->begin_emitted == FALSE)
+ if (query->begin_emitted == false)
return;
if (caps->family == CHIP_RV530) {
} else
r300_emit_query_end_frag_pipes(r300, query);
- query->begin_emitted = FALSE;
+ query->begin_emitted = false;
query->num_results += query->num_pipes;
/* XXX grab all the results and reset the counter. */
struct r300_texture_sampler_state *texstate;
struct r300_resource *tex;
unsigned i;
- boolean has_us_format = r300->screen->caps.has_us_format;
+ bool has_us_format = r300->screen->caps.has_us_format;
CS_LOCALS(r300);
BEGIN_CS(size);
}
void r300_emit_vertex_arrays(struct r300_context* r300, int offset,
- boolean indexed, int instance_id)
+ bool indexed, int instance_id)
{
struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
struct pipe_vertex_element *velem = r300->velems->velem;
size1 = hw_format_size[i];
size2 = hw_format_size[i+1];
- OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride) |
- R300_VBPNTR_SIZE1(size2) | R300_VBPNTR_STRIDE1(vb2->stride));
- OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride);
- OUT_CS(vb2->buffer_offset + velem[i+1].src_offset + offset * vb2->stride);
+ OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(velem[i].src_stride) |
+ R300_VBPNTR_SIZE1(size2) | R300_VBPNTR_STRIDE1(velem[i+1].src_stride));
+ OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * velem[i].src_stride);
+ OUT_CS(vb2->buffer_offset + velem[i+1].src_offset + offset * velem[i+1].src_stride);
}
if (vertex_array_count & 1) {
vb1 = &vbuf[velem[i].vertex_buffer_index];
size1 = hw_format_size[i];
- OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride));
- OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride);
+ OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(velem[i].src_stride));
+ OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * velem[i].src_stride);
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
} else {
if (velem[i].instance_divisor) {
stride1 = 0;
offset1 = vb1->buffer_offset + velem[i].src_offset +
- (instance_id / velem[i].instance_divisor) * vb1->stride;
+ (instance_id / velem[i].instance_divisor) * velem[i].src_stride;
} else {
- stride1 = vb1->stride;
- offset1 = vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride;
+ stride1 = velem[i].src_stride;
+ offset1 = vb1->buffer_offset + velem[i].src_offset + offset * velem[i].src_stride;
}
if (velem[i+1].instance_divisor) {
stride2 = 0;
offset2 = vb2->buffer_offset + velem[i+1].src_offset +
- (instance_id / velem[i+1].instance_divisor) * vb2->stride;
+ (instance_id / velem[i+1].instance_divisor) * velem[i+1].src_stride;
} else {
- stride2 = vb2->stride;
- offset2 = vb2->buffer_offset + velem[i+1].src_offset + offset * vb2->stride;
+ stride2 = velem[i+1].src_stride;
+ offset2 = vb2->buffer_offset + velem[i+1].src_offset + offset * velem[i+1].src_stride;
}
OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(stride1) |
if (velem[i].instance_divisor) {
stride1 = 0;
offset1 = vb1->buffer_offset + velem[i].src_offset +
- (instance_id / velem[i].instance_divisor) * vb1->stride;
+ (instance_id / velem[i].instance_divisor) * velem[i].src_stride;
} else {
- stride1 = vb1->stride;
- offset1 = vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride;
+ stride1 = velem[i].src_stride;
+ offset1 = vb1->buffer_offset + velem[i].src_offset + offset * velem[i].src_stride;
}
OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(stride1));
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
}
END_CS;
}
-void r300_emit_vertex_arrays_swtcl(struct r300_context *r300, boolean indexed)
+void r300_emit_vertex_arrays_swtcl(struct r300_context *r300, bool indexed)
{
CS_LOCALS(r300);
OUT_CS(r300->draw_vbo_offset);
OUT_CS(0);
- assert(r300->vbo_cs);
- cs_winsys->cs_write_reloc(cs_copy, r300->vbo_cs);
- CS_USED_DW(2);
+ assert(r300->vbo);
+ OUT_CS(0xc0001000); /* PKT3_NOP */
+ OUT_CS(r300->rws->cs_lookup_buffer(&r300->cs, r300->vbo) * 4);
END_CS;
}
void r300_emit_vs_state(struct r300_context* r300, unsigned size, void* state)
{
- struct r300_vertex_shader* vs = (struct r300_vertex_shader*)state;
+ struct r300_vertex_shader_code* vs = ((struct r300_vertex_shader*)state)->shader;
struct r300_vertex_program_code* code = &vs->code;
struct r300_screen* r300screen = r300->screen;
unsigned instruction_count = code->length / 4;
* R300_VAP_PVS_CODE_CNTL_1
* See the r5xx docs for instructions on how to use these. */
OUT_CS_REG(R300_VAP_PVS_CODE_CNTL_0, R300_PVS_FIRST_INST(0) |
- R300_PVS_XYZW_VALID_INST(instruction_count - 1) |
+ R300_PVS_XYZW_VALID_INST(code->last_pos_write) |
R300_PVS_LAST_INST(instruction_count - 1));
- OUT_CS_REG(R300_VAP_PVS_CODE_CNTL_1, instruction_count - 1);
+ OUT_CS_REG(R300_VAP_PVS_CODE_CNTL_1, code->last_input_read);
OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG, 0);
OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->length);
R300_PVS_NUM_CNTLRS(pvs_num_controllers) |
R300_PVS_NUM_FPUS(r300screen->caps.num_vert_fpus) |
R300_PVS_VF_MAX_VTX_NUM(12) |
+ (r300->clip_halfz ? R300_DX_CLIP_SPACE_DEF : 0) |
(r300screen->caps.is_r500 ? R500_TCL_STATE_OPTIMIZATION : 0));
/* Emit flow control instructions. Even if there are no fc instructions,
void r300_emit_vs_constants(struct r300_context* r300,
unsigned size, void *state)
{
- unsigned count =
- ((struct r300_vertex_shader*)r300->vs_state.state)->externals_count;
+ unsigned count = r300_vs(r300)->shader->externals_count;
struct r300_constant_buffer *buf = (struct r300_constant_buffer*)state;
- struct r300_vertex_shader *vs = (struct r300_vertex_shader*)r300->vs_state.state;
+ struct r300_vertex_shader_code *vs = r300_vs(r300)->shader;
unsigned i;
int imm_first = vs->externals_count;
int imm_end = vs->code.constants.Count;
tex = r300_resource(fb->zsbuf->texture);
BEGIN_CS(size);
- OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
- R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
- R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_HIZ, 2);
OUT_CS(0);
OUT_CS(tex->tex.hiz_dwords[fb->zsbuf->u.tex.level]);
END_CS;
/* Mark the current zbuffer's hiz ram as in use. */
- r300->hiz_in_use = TRUE;
+ r300->hiz_in_use = true;
r300->hiz_func = HIZ_FUNC_NONE;
r300_mark_atom_dirty(r300, &r300->hyperz_state);
}
tex = r300_resource(fb->zsbuf->texture);
BEGIN_CS(size);
- OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
- R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
- R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_ZMASK, 2);
OUT_CS(0);
OUT_CS(tex->tex.zmask_dwords[fb->zsbuf->u.tex.level]);
END_CS;
/* Mark the current zbuffer's zmask as in use. */
- r300->zmask_in_use = TRUE;
+ r300->zmask_in_use = true;
r300_mark_atom_dirty(r300, &r300->hyperz_state);
}
+void r300_emit_cmask_clear(struct r300_context *r300, unsigned size, void *state)
+{
+ struct pipe_framebuffer_state *fb =
+ (struct pipe_framebuffer_state*)r300->fb_state.state;
+ struct r300_resource *tex;
+ CS_LOCALS(r300);
+
+ tex = r300_resource(fb->cbufs[0]->texture);
+
+ BEGIN_CS(size);
+ OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_CMASK, 2);
+ OUT_CS(0);
+ OUT_CS(tex->tex.cmask_dwords);
+ OUT_CS(0);
+ END_CS;
+
+ /* Mark the current zbuffer's zmask as in use. */
+ r300->cmask_in_use = true;
+ r300_mark_fb_state_dirty(r300, R300_CHANGED_CMASK_ENABLE);
+}
+
void r300_emit_ztop_state(struct r300_context* r300,
unsigned size, void* state)
{
END_CS;
}
-boolean r300_emit_buffer_validate(struct r300_context *r300,
- boolean do_validate_vertex_buffers,
- struct pipe_resource *index_buffer)
+bool r300_emit_buffer_validate(struct r300_context *r300,
+ bool do_validate_vertex_buffers,
+ struct pipe_resource *index_buffer)
{
struct pipe_framebuffer_state *fb =
(struct pipe_framebuffer_state*)r300->fb_state.state;
(struct r300_textures_state*)r300->textures_state.state;
struct r300_resource *tex;
unsigned i;
- boolean flushed = FALSE;
+ bool flushed = false;
validate:
if (r300->fb_state.dirty) {
/* Color buffers... */
for (i = 0; i < fb->nr_cbufs; i++) {
+ if (!fb->cbufs[i])
+ continue;
tex = r300_resource(fb->cbufs[i]->texture);
assert(tex && tex->buf && "cbuf is marked, but NULL!");
- r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
- RADEON_USAGE_READWRITE,
+ r300->rws->cs_add_buffer(&r300->cs, tex->buf,
+ RADEON_USAGE_READWRITE | RADEON_USAGE_SYNCHRONIZED |
+ (tex->b.nr_samples > 1 ?
+ RADEON_PRIO_COLOR_BUFFER_MSAA :
+ RADEON_PRIO_COLOR_BUFFER),
r300_surface(fb->cbufs[i])->domain);
}
/* ...depth buffer... */
if (fb->zsbuf) {
tex = r300_resource(fb->zsbuf->texture);
assert(tex && tex->buf && "zsbuf is marked, but NULL!");
- r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
- RADEON_USAGE_READWRITE,
+ r300->rws->cs_add_buffer(&r300->cs, tex->buf,
+ RADEON_USAGE_READWRITE | RADEON_USAGE_SYNCHRONIZED |
+ (tex->b.nr_samples > 1 ?
+ RADEON_PRIO_DEPTH_BUFFER_MSAA :
+ RADEON_PRIO_DEPTH_BUFFER),
r300_surface(fb->zsbuf)->domain);
}
}
/* The AA resolve buffer. */
if (r300->aa_state.dirty) {
if (aa->dest) {
- r300->rws->cs_add_reloc(r300->cs, aa->dest->cs_buf,
- RADEON_USAGE_WRITE,
+ r300->rws->cs_add_buffer(&r300->cs, aa->dest->buf,
+ RADEON_USAGE_WRITE | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_COLOR_BUFFER,
aa->dest->domain);
}
}
if (r300->textures_state.dirty) {
/* ...textures... */
for (i = 0; i < texstate->count; i++) {
- if (!(texstate->tx_enable & (1 << i))) {
+ if (!(texstate->tx_enable & (1U << i))) {
continue;
}
tex = r300_resource(texstate->sampler_views[i]->base.texture);
- r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READ,
+ r300->rws->cs_add_buffer(&r300->cs, tex->buf,
+ RADEON_USAGE_READ | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_SAMPLER_TEXTURE,
tex->domain);
}
}
/* ...occlusion query buffer... */
if (r300->query_current)
- r300->rws->cs_add_reloc(r300->cs, r300->query_current->cs_buf,
- RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
+ r300->rws->cs_add_buffer(&r300->cs, r300->query_current->buf,
+ RADEON_USAGE_WRITE | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_QUERY,
+ RADEON_DOMAIN_GTT);
/* ...vertex buffer for SWTCL path... */
- if (r300->vbo_cs)
- r300->rws->cs_add_reloc(r300->cs, r300->vbo_cs,
- RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
+ if (r300->vbo)
+ r300->rws->cs_add_buffer(&r300->cs, r300->vbo,
+ RADEON_USAGE_READ | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_VERTEX_BUFFER,
+ RADEON_DOMAIN_GTT);
/* ...vertex buffers for HWTCL path... */
if (do_validate_vertex_buffers && r300->vertex_arrays_dirty) {
struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
struct pipe_resource *buf;
for (; vbuf != last; vbuf++) {
- buf = vbuf->buffer;
+ buf = vbuf->buffer.resource;
if (!buf)
continue;
- r300->rws->cs_add_reloc(r300->cs, r300_resource(buf)->cs_buf,
- RADEON_USAGE_READ,
+ r300->rws->cs_add_buffer(&r300->cs, r300_resource(buf)->buf,
+ RADEON_USAGE_READ | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_SAMPLER_BUFFER,
r300_resource(buf)->domain);
}
}
/* ...and index buffer for HWTCL path. */
if (index_buffer)
- r300->rws->cs_add_reloc(r300->cs, r300_resource(index_buffer)->cs_buf,
- RADEON_USAGE_READ,
+ r300->rws->cs_add_buffer(&r300->cs, r300_resource(index_buffer)->buf,
+ RADEON_USAGE_READ | RADEON_USAGE_SYNCHRONIZED |
+ RADEON_PRIO_INDEX_BUFFER,
r300_resource(index_buffer)->domain);
/* Now do the validation (flush is called inside cs_validate on failure). */
- if (!r300->rws->cs_validate(r300->cs)) {
+ if (!r300->rws->cs_validate(&r300->cs)) {
/* Ooops, an infinite loop, give up. */
if (flushed)
- return FALSE;
+ return false;
- flushed = TRUE;
+ flushed = true;
goto validate;
}
- return TRUE;
+ return true;
}
unsigned r300_get_num_dirty_dwords(struct r300_context *r300)
dwords += r300->hyperz_state.size + 2; /* emit_hyperz_end + zcache flush */
if (r300->screen->caps.is_r500)
dwords += 2; /* emit_index_bias */
- if (r300->screen->info.drm_minor >= 6)
- dwords += 3; /* MSPOS */
+ dwords += 3; /* MSPOS */
return dwords;
}
foreach_dirty_atom(r300, atom) {
if (atom->dirty) {
atom->emit(r300, atom->size, atom->state);
- atom->dirty = FALSE;
+ atom->dirty = false;
}
}