* Note that we don't move constant-indexed accesses to arrays. No
* testing has been done of the performance impact of this choice.
*/
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
for (int i = 0 ; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
continue;
calculate_live_intervals();
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
int ip = next_ip;
next_ip++;
memset(last_mrf_move, 0, sizeof(last_mrf_move));
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->is_control_flow()) {
memset(last_mrf_move, 0, sizeof(last_mrf_move));
}
* have a .reg_offset of 0.
*/
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->mlen != 0 && inst->dst.file == GRF) {
insert_gen4_pre_send_dependency_workarounds(inst);
insert_gen4_post_send_dependency_workarounds(inst);
{
bool progress = false;
- foreach_list_safe(node, &instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
fs_reg dst = inst->dst;
{
bool flag_mov_found[2] = {false};
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->is_control_flow()) {
memset(flag_mov_found, 0, sizeof(flag_mov_found));
} else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
/* kill the destination from the ACP */
if (inst->dst.file == GRF) {
- foreach_list_safe(entry_node, &acp[inst->dst.reg % ACP_HASH_SIZE]) {
- acp_entry *entry = (acp_entry *)entry_node;
-
+ foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.reg % ACP_HASH_SIZE]) {
if (inst->overwrites_reg(entry->dst)) {
entry->remove();
}
* the source, so walk across the entire table.
*/
for (int i = 0; i < ACP_HASH_SIZE; i++) {
- foreach_list_safe(entry_node, &acp[i]) {
- acp_entry *entry = (acp_entry *)entry_node;
+ foreach_in_list_safe(acp_entry, entry, &acp[i]) {
if (inst->overwrites_reg(entry->src))
entry->remove();
}
}
}
- foreach_list_safe(entry_node, aeb) {
- aeb_entry *entry = (aeb_entry *)entry_node;
-
+ foreach_in_list_safe(aeb_entry, entry, aeb) {
/* Kill all AEB entries that write a different value to or read from
* the flag register if we just wrote it.
*/
ralloc_free(live);
if (progress) {
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->opcode == BRW_OPCODE_NOP) {
inst->remove();
}
}
if (progress) {
- foreach_list_safe(node, &this->instructions) {
- fs_inst *inst = (fs_inst *)node;
-
+ foreach_in_list_safe(fs_inst, inst, &instructions) {
if (inst->opcode == BRW_OPCODE_NOP) {
inst->remove();
}
visit_list_elements(&refs, instructions);
/* Trim out variables we can't split. */
- foreach_list_safe(node, &refs.variable_list) {
- variable_entry *entry = (variable_entry *)node;
-
+ foreach_in_list_safe(variable_entry, entry, &refs.variable_list) {
if (debug) {
fprintf(stderr, "vector %s@%p: whole_access %d\n",
entry->var->name, (void *) entry->var,
void
fs_visitor::emit(exec_list list)
{
- foreach_list_safe(node, &list) {
- fs_inst *inst = (fs_inst *)node;
+ foreach_in_list_safe(fs_inst, inst, &list) {
inst->remove();
emit(inst);
}
time = 0;
/* Remove non-DAG heads from the list. */
- foreach_list_safe(node, &instructions) {
- schedule_node *n = (schedule_node *)node;
+ foreach_in_list_safe(schedule_node, n, &instructions) {
if (n->parent_count != 0)
n->remove();
}
calculate_live_intervals();
- foreach_list_safe(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_in_list_safe(vec4_instruction, inst, &instructions) {
pc++;
bool inst_writes_flag = false;
/* Now actually rewrite usage of the things we've moved to pull
* constants.
*/
- foreach_list_safe(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_in_list_safe(vec4_instruction, inst, &instructions) {
for (int i = 0 ; i < 3; i++) {
if (inst->src[i].file != UNIFORM ||
pull_constant_loc[inst->src[i].reg] == -1)
calculate_live_intervals();
- foreach_list_safe(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_in_list_safe(vec4_instruction, inst, &instructions) {
int ip = next_ip;
next_ip++;
* we may generate a new scratch_write instruction after the one
* we're processing.
*/
- foreach_list_safe(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_in_list_safe(vec4_instruction, inst, &instructions) {
/* Set up the annotation tracking for new generated instructions. */
base_ir = inst->ir;
current_annotation = inst->annotation;
* Note that we don't move constant-indexed accesses to arrays. No
* testing has been done of the performance impact of this choice.
*/
- foreach_list_safe(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_in_list_safe(vec4_instruction, inst, &instructions) {
for (int i = 0 ; i < 3; i++) {
if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
continue;
void
intel_resolve_map_clear(struct exec_list *resolve_map)
{
- foreach_list_safe(node, resolve_map) {
+ foreach_in_list_safe(struct exec_node, node, resolve_map) {
free(node);
}