#define LINK_MASK (1 << (GPR_LINK))
/* First GPR. */
-#define MS1_INT_ARG_FIRST 1
+#define MT_INT_ARG_FIRST 1
/* Given a SIZE in bytes, advance to the next word. */
#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
/* Define the information needed to generate branch and scc insns.
This is stored from the compare operation. */
-struct rtx_def * ms1_compare_op0;
-struct rtx_def * ms1_compare_op1;
+struct rtx_def * mt_compare_op0;
+struct rtx_def * mt_compare_op1;
/* Current frame information calculated by compute_frame_size. */
-struct ms1_frame_info current_frame_info;
+struct mt_frame_info current_frame_info;
/* Zero structure to initialize current_frame_info. */
-struct ms1_frame_info zero_frame_info;
+struct mt_frame_info zero_frame_info;
-/* ms1 doesn't have unsigned compares need a library call for this. */
-struct rtx_def * ms1_ucmpsi3_libcall;
+/* mt doesn't have unsigned compares need a library call for this. */
+struct rtx_def * mt_ucmpsi3_libcall;
-static int ms1_flag_delayed_branch;
+static int mt_flag_delayed_branch;
\f
static rtx
-ms1_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+mt_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
int incoming ATTRIBUTE_UNUSED)
{
return gen_rtx_REG (Pmode, RETVAL_REGNUM);
/* Implement RETURN_ADDR_RTX. */
rtx
-ms1_return_addr_rtx (int count)
+mt_return_addr_rtx (int count)
{
if (count != 0)
return NULL_RTX;
/* The following variable value indicates the number of nops required
between the current instruction and the next instruction to avoid
any pipeline hazards. */
-static int ms1_nops_required = 0;
-static const char * ms1_nop_reasons = "";
+static int mt_nops_required = 0;
+static const char * mt_nop_reasons = "";
/* Implement ASM_OUTPUT_OPCODE. */
const char *
-ms1_asm_output_opcode (FILE *f ATTRIBUTE_UNUSED, const char *ptr)
+mt_asm_output_opcode (FILE *f ATTRIBUTE_UNUSED, const char *ptr)
{
- if (ms1_nops_required)
+ if (mt_nops_required)
fprintf (f, ";# need %d nops because of %s\n\t",
- ms1_nops_required, ms1_nop_reasons);
+ mt_nops_required, mt_nop_reasons);
- while (ms1_nops_required)
+ while (mt_nops_required)
{
fprintf (f, "or r0, r0, r0\n\t");
- -- ms1_nops_required;
+ -- mt_nops_required;
}
return ptr;
/* Given an insn, return whether it's a memory operation or a branch
operation, otherwise return TYPE_ARITH. */
static enum attr_type
-ms1_get_attr_type (rtx complete_insn)
+mt_get_attr_type (rtx complete_insn)
{
rtx insn = PATTERN (complete_insn);
/* The following determines the number of nops that need to be
inserted between the previous instructions and current instruction
- to avoid pipeline hazards on the ms1 processor. Remember that
+ to avoid pipeline hazards on the mt processor. Remember that
the function is not called for asm insns. */
void
-ms1_final_prescan_insn (rtx insn,
+mt_final_prescan_insn (rtx insn,
rtx * opvec ATTRIBUTE_UNUSED,
int noperands ATTRIBUTE_UNUSED)
{
rtx prev_i;
enum attr_type prev_attr;
- ms1_nops_required = 0;
- ms1_nop_reasons = "";
+ mt_nops_required = 0;
+ mt_nop_reasons = "";
/* ms2 constraints are dealt with in reorg. */
if (TARGET_MS2)
if (prev_i == NULL || ! INSN_P (prev_i))
return;
- prev_attr = ms1_get_attr_type (prev_i);
+ prev_attr = mt_get_attr_type (prev_i);
/* Delayed branch slots already taken care of by delay branch scheduling. */
if (prev_attr == TYPE_BRANCH)
return;
- switch (ms1_get_attr_type (insn))
+ switch (mt_get_attr_type (insn))
{
case TYPE_LOAD:
case TYPE_STORE:
if ((prev_attr == TYPE_LOAD || prev_attr == TYPE_STORE)
&& TARGET_MS1_64_001)
{
- ms1_nops_required = 1;
- ms1_nop_reasons = "consecutive mem ops";
+ mt_nops_required = 1;
+ mt_nop_reasons = "consecutive mem ops";
}
/* Drop through. */
if (prev_attr == TYPE_LOAD
&& insn_true_dependent_p (prev_i, insn))
{
- ms1_nops_required = 1;
- ms1_nop_reasons = "load->arith dependency delay";
+ mt_nops_required = 1;
+ mt_nop_reasons = "load->arith dependency delay";
}
break;
{
/* One cycle of delay between arith
instructions and branch dependent on arith. */
- ms1_nops_required = 1;
- ms1_nop_reasons = "arith->branch dependency delay";
+ mt_nops_required = 1;
+ mt_nop_reasons = "arith->branch dependency delay";
}
else if (prev_attr == TYPE_LOAD)
{
/* Two cycles of delay are required
between load and dependent branch. */
if (TARGET_MS1_64_001)
- ms1_nops_required = 2;
+ mt_nops_required = 2;
else
- ms1_nops_required = 1;
- ms1_nop_reasons = "load->branch dependency delay";
+ mt_nops_required = 1;
+ mt_nop_reasons = "load->branch dependency delay";
}
}
break;
default:
- fatal_insn ("ms1_final_prescan_insn, invalid insn #1", insn);
+ fatal_insn ("mt_final_prescan_insn, invalid insn #1", insn);
break;
}
}
/* Print debugging information for a frame. */
static void
-ms1_debug_stack (struct ms1_frame_info * info)
+mt_debug_stack (struct mt_frame_info * info)
{
int regno;
/* Print a memory address as an operand to reference that memory location. */
static void
-ms1_print_operand_simple_address (FILE * file, rtx addr)
+mt_print_operand_simple_address (FILE * file, rtx addr)
{
if (!addr)
error ("PRINT_OPERAND_ADDRESS, null pointer");
/* Implement PRINT_OPERAND_ADDRESS. */
void
-ms1_print_operand_address (FILE * file, rtx addr)
+mt_print_operand_address (FILE * file, rtx addr)
{
if (GET_CODE (addr) == AND
&& GET_CODE (XEXP (addr, 1)) == CONST_INT
&& INTVAL (XEXP (addr, 1)) == -3)
- ms1_print_operand_simple_address (file, XEXP (addr, 0));
+ mt_print_operand_simple_address (file, XEXP (addr, 0));
else
- ms1_print_operand_simple_address (file, addr);
+ mt_print_operand_simple_address (file, addr);
}
/* Implement PRINT_OPERAND. */
void
-ms1_print_operand (FILE * file, rtx x, int code)
+mt_print_operand (FILE * file, rtx x, int code)
{
switch (code)
{
break;
default:
- /* output_operand_lossage ("ms1_print_operand: unknown code"); */
+ /* output_operand_lossage ("mt_print_operand: unknown code"); */
fprintf (file, "unknown code");
return;
}
break;
case MEM:
- ms1_print_operand_address(file, XEXP (x,0));
+ mt_print_operand_address(file, XEXP (x,0));
break;
case LABEL_REF:
/* Implement INIT_CUMULATIVE_ARGS. */
void
-ms1_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, rtx libname,
- tree fndecl ATTRIBUTE_UNUSED, int incoming)
+mt_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, rtx libname,
+ tree fndecl ATTRIBUTE_UNUSED, int incoming)
{
*cum = 0;
if (TARGET_DEBUG_ARG)
{
- fprintf (stderr, "\nms1_init_cumulative_args:");
+ fprintf (stderr, "\nmt_init_cumulative_args:");
if (incoming)
fputs (" incoming", stderr);
*PREGNO records the register number to use if scalar type. */
static int
-ms1_function_arg_slotno (const CUMULATIVE_ARGS * cum,
- enum machine_mode mode,
- tree type,
- int named ATTRIBUTE_UNUSED,
- int incoming_p ATTRIBUTE_UNUSED,
- int * pregno)
-{
- int regbase = MS1_INT_ARG_FIRST;
+mt_function_arg_slotno (const CUMULATIVE_ARGS * cum,
+ enum machine_mode mode,
+ tree type,
+ int named ATTRIBUTE_UNUSED,
+ int incoming_p ATTRIBUTE_UNUSED,
+ int * pregno)
+{
+ int regbase = MT_INT_ARG_FIRST;
int slotno = * cum;
if (mode == VOIDmode || targetm.calls.must_pass_in_stack (mode, type))
return -1;
- if (slotno >= MS1_NUM_ARG_REGS)
+ if (slotno >= MT_NUM_ARG_REGS)
return -1;
* pregno = regbase + slotno;
/* Implement FUNCTION_ARG. */
rtx
-ms1_function_arg (const CUMULATIVE_ARGS * cum,
- enum machine_mode mode,
- tree type,
- int named,
- int incoming_p)
+mt_function_arg (const CUMULATIVE_ARGS * cum,
+ enum machine_mode mode,
+ tree type,
+ int named,
+ int incoming_p)
{
int slotno, regno;
rtx reg;
- slotno = ms1_function_arg_slotno (cum, mode, type, named, incoming_p,
- & regno);
+ slotno = mt_function_arg_slotno (cum, mode, type, named, incoming_p, ®no);
if (slotno == -1)
reg = NULL_RTX;
/* Implement FUNCTION_ARG_ADVANCE. */
void
-ms1_function_arg_advance (CUMULATIVE_ARGS * cum,
- enum machine_mode mode,
- tree type ATTRIBUTE_UNUSED,
- int named)
+mt_function_arg_advance (CUMULATIVE_ARGS * cum,
+ enum machine_mode mode,
+ tree type ATTRIBUTE_UNUSED,
+ int named)
{
int slotno, regno;
/* We pass 0 for incoming_p here, it doesn't matter. */
- slotno = ms1_function_arg_slotno (cum, mode, type, named, 0, ®no);
+ slotno = mt_function_arg_slotno (cum, mode, type, named, 0, ®no);
* cum += (mode != BLKmode
? ROUND_ADVANCE (GET_MODE_SIZE (mode))
if (TARGET_DEBUG_ARG)
fprintf (stderr,
- "ms1_function_arg_advance: words = %2d, mode = %4s, named = %d, size = %3d\n",
+ "mt_function_arg_advance: words = %2d, mode = %4s, named = %d, size = %3d\n",
*cum, GET_MODE_NAME (mode), named,
(*cum) * UNITS_PER_WORD);
}
that are passed entirely in registers or that are entirely pushed
on the stack. */
static int
-ms1_arg_partial_bytes (CUMULATIVE_ARGS * pcum,
+mt_arg_partial_bytes (CUMULATIVE_ARGS * pcum,
enum machine_mode mode,
tree type,
bool named ATTRIBUTE_UNUSED)
else
words = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
- if (! targetm.calls.pass_by_reference (& cum, mode, type, named)
- && cum < MS1_NUM_ARG_REGS
- && (cum + words) > MS1_NUM_ARG_REGS)
+ if (! targetm.calls.pass_by_reference (&cum, mode, type, named)
+ && cum < MT_NUM_ARG_REGS
+ && (cum + words) > MT_NUM_ARG_REGS)
{
- int bytes = (MS1_NUM_ARG_REGS - cum) * UNITS_PER_WORD;
+ int bytes = (MT_NUM_ARG_REGS - cum) * UNITS_PER_WORD;
if (TARGET_DEBUG)
fprintf (stderr, "function_arg_partial_nregs = %d\n", bytes);
/* Implement TARGET_PASS_BY_REFERENCE hook. */
static bool
-ms1_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
+mt_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED,
tree type,
bool named ATTRIBUTE_UNUSED)
/* Implement FUNCTION_ARG_BOUNDARY. */
int
-ms1_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
+mt_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
tree type ATTRIBUTE_UNUSED)
{
return BITS_PER_WORD;
/* Implement REG_OK_FOR_BASE_P. */
int
-ms1_reg_ok_for_base_p (rtx x, int strict)
+mt_reg_ok_for_base_p (rtx x, int strict)
{
if (strict)
return (((unsigned) REGNO (x)) < FIRST_PSEUDO_REGISTER);
return 1;
}
-/* Helper function of ms1_legitimate_address_p. Return true if XINSN
+/* Helper function of mt_legitimate_address_p. Return true if XINSN
is a simple address, otherwise false. */
static bool
-ms1_legitimate_simple_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
- rtx xinsn,
- int strict)
+mt_legitimate_simple_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx xinsn, int strict)
{
if (TARGET_DEBUG)
{
debug_rtx (xinsn);
}
- if (GET_CODE (xinsn) == REG && ms1_reg_ok_for_base_p (xinsn, strict))
+ if (GET_CODE (xinsn) == REG && mt_reg_ok_for_base_p (xinsn, strict))
return true;
if (GET_CODE (xinsn) == PLUS
&& GET_CODE (XEXP (xinsn, 0)) == REG
- && ms1_reg_ok_for_base_p (XEXP (xinsn, 0), strict)
+ && mt_reg_ok_for_base_p (XEXP (xinsn, 0), strict)
&& GET_CODE (XEXP (xinsn, 1)) == CONST_INT
&& SMALL_INT (XEXP (xinsn, 1)))
return true;
/* Helper function of GO_IF_LEGITIMATE_ADDRESS. Return non-zero if
- XINSN is a legitimate address on MS1. */
+ XINSN is a legitimate address on MT. */
int
-ms1_legitimate_address_p (enum machine_mode mode,
- rtx xinsn,
- int strict)
+mt_legitimate_address_p (enum machine_mode mode, rtx xinsn, int strict)
{
- if (ms1_legitimate_simple_address_p (mode, xinsn, strict))
+ if (mt_legitimate_simple_address_p (mode, xinsn, strict))
return 1;
if ((mode) == SImode
&& GET_CODE (xinsn) == AND
&& GET_CODE (XEXP (xinsn, 1)) == CONST_INT
&& INTVAL (XEXP (xinsn, 1)) == -3)
- return ms1_legitimate_simple_address_p (mode, XEXP (xinsn, 0), strict);
+ return mt_legitimate_simple_address_p (mode, XEXP (xinsn, 0), strict);
else
return 0;
}
/* True if the current function is an interrupt handler
(either via #pragma or an attribute specification). */
int interrupt_handler;
-enum processor_type ms1_cpu;
+enum processor_type mt_cpu;
static struct machine_function *
-ms1_init_machine_status (void)
+mt_init_machine_status (void)
{
struct machine_function *f;
/* Implement OVERRIDE_OPTIONS. */
void
-ms1_override_options (void)
+mt_override_options (void)
{
- if (ms1_cpu_string != NULL)
+ if (mt_cpu_string != NULL)
{
- if (!strcasecmp (ms1_cpu_string, "MS1-64-001"))
- ms1_cpu = PROCESSOR_MS1_64_001;
- else if (!strcasecmp (ms1_cpu_string, "MS1-16-002"))
- ms1_cpu = PROCESSOR_MS1_16_002;
- else if (!strcasecmp (ms1_cpu_string, "MS1-16-003"))
- ms1_cpu = PROCESSOR_MS1_16_003;
- else if (!strcasecmp (ms1_cpu_string, "MS2"))
- ms1_cpu = PROCESSOR_MS2;
+ if (!strcasecmp (mt_cpu_string, "MS1-64-001"))
+ mt_cpu = PROCESSOR_MS1_64_001;
+ else if (!strcasecmp (mt_cpu_string, "MS1-16-002"))
+ mt_cpu = PROCESSOR_MS1_16_002;
+ else if (!strcasecmp (mt_cpu_string, "MS1-16-003"))
+ mt_cpu = PROCESSOR_MS1_16_003;
+ else if (!strcasecmp (mt_cpu_string, "MS2"))
+ mt_cpu = PROCESSOR_MS2;
else
- error ("bad value (%s) for -march= switch", ms1_cpu_string);
+ error ("bad value (%s) for -march= switch", mt_cpu_string);
}
else
- ms1_cpu = PROCESSOR_MS2;
+ mt_cpu = PROCESSOR_MS2;
if (flag_exceptions)
{
}
/* We do delayed branch filling in machine dependent reorg */
- ms1_flag_delayed_branch = flag_delayed_branch;
+ mt_flag_delayed_branch = flag_delayed_branch;
flag_delayed_branch = 0;
- init_machine_status = ms1_init_machine_status;
+ init_machine_status = mt_init_machine_status;
}
/* Do what is necessary for `va_start'. We look at the current function
first unnamed parameter. */
static rtx
-ms1_builtin_saveregs (void)
+mt_builtin_saveregs (void)
{
int first_reg = 0;
rtx address;
int regno;
- for (regno = first_reg; regno < MS1_NUM_ARG_REGS; regno ++)
- emit_move_insn (gen_rtx_MEM (word_mode,
- gen_rtx_PLUS (Pmode,
- gen_rtx_REG (SImode, ARG_POINTER_REGNUM),
- GEN_INT (UNITS_PER_WORD * regno))),
- gen_rtx_REG (word_mode,
- MS1_INT_ARG_FIRST + regno));
+ for (regno = first_reg; regno < MT_NUM_ARG_REGS; regno ++)
+ emit_move_insn
+ (gen_rtx_MEM (word_mode,
+ gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (SImode, ARG_POINTER_REGNUM),
+ GEN_INT (UNITS_PER_WORD * regno))),
+ gen_rtx_REG (word_mode,
+ MT_INT_ARG_FIRST + regno));
address = gen_rtx_PLUS (Pmode,
gen_rtx_REG (SImode, ARG_POINTER_REGNUM),
/* Implement `va_start'. */
void
-ms1_va_start (tree valist, rtx nextarg)
+mt_va_start (tree valist, rtx nextarg)
{
- ms1_builtin_saveregs ();
+ mt_builtin_saveregs ();
std_expand_builtin_va_start (valist, nextarg);
}
needed for local variables. */
unsigned int
-ms1_compute_frame_size (int size)
+mt_compute_frame_size (int size)
{
int regno;
unsigned int total_size;
STACK_OFFSET is the offset from the SP where the save will happen.
This function sets the REG_FRAME_RELATED_EXPR note accordingly. */
static void
-ms1_emit_save_restore (enum save_direction direction,
- rtx reg,
- rtx mem,
- int stack_offset)
+mt_emit_save_restore (enum save_direction direction,
+ rtx reg, rtx mem, int stack_offset)
{
if (direction == FROM_PROCESSOR_TO_MEM)
{
insn = emit_move_insn (mem, reg);
RTX_FRAME_RELATED_P (insn) = 1;
REG_NOTES (insn)
- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode,
- gen_rtx_MEM
- (SImode,
- gen_rtx_PLUS (SImode,
- stack_pointer_rtx,
- GEN_INT (stack_offset))),
- reg),
- REG_NOTES (insn));
+ = gen_rtx_EXPR_LIST
+ (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (SImode,
+ stack_pointer_rtx,
+ GEN_INT (stack_offset))),
+ reg),
+ REG_NOTES (insn));
}
else
emit_move_insn (reg, mem);
frame pointer in epilogue. */
static void
-ms1_emit_save_fp (enum save_direction direction,
- struct ms1_frame_info info)
+mt_emit_save_fp (enum save_direction direction,
+ struct mt_frame_info info)
{
rtx base_reg;
int reg_mask = info.reg_mask & ~(FP_MASK | LINK_MASK);
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
- ms1_emit_save_restore (direction, gen_rtx_REG (SImode, GPR_FP),
- gen_rtx_MEM (SImode,
- gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
- stack_offset);
+ mt_emit_save_restore
+ (direction, gen_rtx_REG (SImode, GPR_FP),
+ gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
+ stack_offset);
}
}
in epilogue. */
static void
-ms1_emit_save_regs (enum save_direction direction,
- struct ms1_frame_info info)
+mt_emit_save_regs (enum save_direction direction,
+ struct mt_frame_info info)
{
rtx base_reg;
int regno;
if (info.save_fp)
{
/* This just records the space for it, the actual move generated in
- ms1_emit_save_fp (). */
+ mt_emit_save_fp (). */
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
}
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
- ms1_emit_save_restore (direction, gen_rtx_REG (SImode, GPR_LINK),
- gen_rtx_MEM (SImode,
- gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
- stack_offset);
+ mt_emit_save_restore
+ (direction, gen_rtx_REG (SImode, GPR_LINK),
+ gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
+ stack_offset);
}
/* Save any needed call-saved regs. */
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
- ms1_emit_save_restore (direction, gen_rtx_REG (SImode, regno),
- gen_rtx_MEM (SImode,
- gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
- stack_offset);
+ mt_emit_save_restore
+ (direction, gen_rtx_REG (SImode, regno),
+ gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
+ stack_offset);
}
}
}
/* Return true if FUNC is a function with the 'interrupt' attribute. */
static bool
-ms1_interrupt_function_p (tree func)
+mt_interrupt_function_p (tree func)
{
tree a;
/* Generate prologue code. */
void
-ms1_expand_prologue (void)
+mt_expand_prologue (void)
{
rtx size_rtx, insn;
unsigned int frame_size;
- if (ms1_interrupt_function_p (current_function_decl))
+ if (mt_interrupt_function_p (current_function_decl))
{
interrupt_handler = 1;
if (cfun->machine)
cfun->machine->interrupt_handler = 1;
}
- ms1_compute_frame_size (get_frame_size ());
+ mt_compute_frame_size (get_frame_size ());
if (TARGET_DEBUG_STACK)
- ms1_debug_stack (¤t_frame_info);
+ mt_debug_stack (¤t_frame_info);
/* Compute size of stack adjustment. */
frame_size = current_frame_info.total_size;
REG_NOTES (insn));
}
- /* Set R9 to point to old sp if required for access to register save area. */
+ /* Set R9 to point to old sp if required for access to register save
+ area. */
if ( current_frame_info.reg_size != 0
&& !CONST_OK_FOR_LETTER_P (frame_size, 'O'))
emit_insn (gen_addsi3 (size_rtx, size_rtx, stack_pointer_rtx));
/* Save the frame pointer. */
- ms1_emit_save_fp (FROM_PROCESSOR_TO_MEM, current_frame_info);
+ mt_emit_save_fp (FROM_PROCESSOR_TO_MEM, current_frame_info);
/* Now put the frame pointer into the frame pointer register. */
if (frame_pointer_needed)
}
/* Save the registers. */
- ms1_emit_save_regs (FROM_PROCESSOR_TO_MEM, current_frame_info);
+ mt_emit_save_regs (FROM_PROCESSOR_TO_MEM, current_frame_info);
/* If we are profiling, make sure no instructions are scheduled before
the call to mcount. */
/* Implement EPILOGUE_USES. */
int
-ms1_epilogue_uses (int regno)
+mt_epilogue_uses (int regno)
{
if (cfun->machine && cfun->machine->interrupt_handler && reload_completed)
return 1;
function epilogue, or EH_EPILOGUE when generating an EH
epilogue. */
void
-ms1_expand_epilogue (enum epilogue_type eh_mode)
+mt_expand_epilogue (enum epilogue_type eh_mode)
{
rtx size_rtx, insn;
unsigned frame_size;
- ms1_compute_frame_size (get_frame_size ());
+ mt_compute_frame_size (get_frame_size ());
if (TARGET_DEBUG_STACK)
- ms1_debug_stack (& current_frame_info);
+ mt_debug_stack (& current_frame_info);
/* Compute size of stack adjustment. */
frame_size = current_frame_info.total_size;
insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
/* Restore the registers. */
- ms1_emit_save_fp (FROM_MEM_TO_PROCESSOR, current_frame_info);
- ms1_emit_save_regs (FROM_MEM_TO_PROCESSOR, current_frame_info);
+ mt_emit_save_fp (FROM_MEM_TO_PROCESSOR, current_frame_info);
+ mt_emit_save_regs (FROM_MEM_TO_PROCESSOR, current_frame_info);
/* Make stack adjustment and use scratch register if constant too
large to fit as immediate. */
/* Generate code for the "eh_return" pattern. */
void
-ms1_expand_eh_return (rtx * operands)
+mt_expand_eh_return (rtx * operands)
{
if (GET_CODE (operands[0]) != REG
|| REGNO (operands[0]) != EH_RETURN_STACKADJ_REGNO)
/* Generate code for the "eh_epilogue" pattern. */
void
-ms1_emit_eh_epilogue (rtx * operands ATTRIBUTE_UNUSED)
+mt_emit_eh_epilogue (rtx * operands ATTRIBUTE_UNUSED)
{
cfun->machine->eh_stack_adjust = EH_RETURN_STACKADJ_RTX; /* operands[0]; */
- ms1_expand_epilogue (EH_EPILOGUE);
+ mt_expand_epilogue (EH_EPILOGUE);
}
/* Handle an "interrupt" attribute. */
static tree
-ms1_handle_interrupt_attribute (tree * node,
+mt_handle_interrupt_attribute (tree * node,
tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
}
/* Table of machine attributes. */
-const struct attribute_spec ms1_attribute_table[] =
+const struct attribute_spec mt_attribute_table[] =
{
/* name, min, max, decl?, type?, func?, handler */
- { "interrupt", 0, 0, false, false, false, ms1_handle_interrupt_attribute },
+ { "interrupt", 0, 0, false, false, false, mt_handle_interrupt_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
/* Implement INITIAL_ELIMINATION_OFFSET. */
int
-ms1_initial_elimination_offset (int from, int to)
+mt_initial_elimination_offset (int from, int to)
{
- ms1_compute_frame_size (get_frame_size ());
+ mt_compute_frame_size (get_frame_size ());
if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
return 0;
represents the result of the compare. */
static rtx
-ms1_generate_compare (enum rtx_code code, rtx op0, rtx op1)
+mt_generate_compare (enum rtx_code code, rtx op0, rtx op1)
{
rtx scratch0, scratch1, const_scratch;
/* Need to adjust ranges for faking unsigned compares. */
scratch0 = gen_reg_rtx (SImode);
scratch1 = gen_reg_rtx (SImode);
- const_scratch = force_reg (SImode, GEN_INT(MS1_MIN_INT));
+ const_scratch = force_reg (SImode, GEN_INT(MT_MIN_INT));
emit_insn (gen_addsi3 (scratch0, const_scratch, op0));
emit_insn (gen_addsi3 (scratch1, const_scratch, op1));
break;
/* Emit a branch of kind CODE to location LOC. */
void
-ms1_emit_cbranch (enum rtx_code code, rtx loc, rtx op0, rtx op1)
+mt_emit_cbranch (enum rtx_code code, rtx loc, rtx op0, rtx op1)
{
rtx condition_rtx, loc_ref;
if (! reg_or_0_operand (op1, SImode))
op1 = copy_to_mode_reg (SImode, op1);
- condition_rtx = ms1_generate_compare (code, op0, op1);
+ condition_rtx = mt_generate_compare (code, op0, op1);
loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
found in part of X. */
static void
-ms1_set_memflags_1 (rtx x, int in_struct_p, int volatile_p)
+mt_set_memflags_1 (rtx x, int in_struct_p, int volatile_p)
{
int i;
case SEQUENCE:
case PARALLEL:
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
- ms1_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p);
+ mt_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p);
break;
case INSN:
- ms1_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p);
+ mt_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p);
break;
case SET:
- ms1_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p);
- ms1_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p);
+ mt_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p);
+ mt_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p);
break;
case MEM:
If REF is not a MEM, don't do anything. */
void
-ms1_set_memflags (rtx ref)
+mt_set_memflags (rtx ref)
{
rtx insn;
int in_struct_p, volatile_p;
in_struct_p = MEM_IN_STRUCT_P (ref);
volatile_p = MEM_VOLATILE_P (ref);
- /* This is only called from ms1.md, after having had something
+ /* This is only called from mt.md, after having had something
generated from one of the insn patterns. So if everything is
zero, the pattern is already up-to-date. */
if (! in_struct_p && ! volatile_p)
return;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- ms1_set_memflags_1 (insn, in_struct_p, volatile_p);
+ mt_set_memflags_1 (insn, in_struct_p, volatile_p);
}
/* Implement SECONDARY_RELOAD_CLASS. */
enum reg_class
-ms1_secondary_reload_class (enum reg_class class ATTRIBUTE_UNUSED,
+mt_secondary_reload_class (enum reg_class class ATTRIBUTE_UNUSED,
enum machine_mode mode,
rtx x)
{
/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE
macros. */
rtx
-ms1_function_value (tree valtype, enum machine_mode mode, tree func_decl ATTRIBUTE_UNUSED)
+mt_function_value (tree valtype, enum machine_mode mode, tree func_decl ATTRIBUTE_UNUSED)
{
if ((mode) == DImode || (mode) == DFmode)
return gen_rtx_MEM (mode, gen_rtx_REG (mode, RETURN_VALUE_REGNUM));
and OPERANDS[5]. */
void
-ms1_split_words (enum machine_mode nmode,
+mt_split_words (enum machine_mode nmode,
enum machine_mode omode,
rtx *operands)
{
/* Implement TARGET_MUST_PASS_IN_STACK hook. */
static bool
-ms1_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
+mt_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
{
return (((type) != 0
&& (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
/* Increment the counter for the number of loop instructions in the
current function. */
-void ms1_add_loop (void)
+void mt_add_loop (void)
{
cfun->machine->has_loops++;
}
is bad. */
static bool
-ms1_loop_nesting (loop_info loop)
+mt_loop_nesting (loop_info loop)
{
loop_info inner;
unsigned ix;
for (ix = 0; VEC_iterate (loop_info, loop->loops, ix++, inner);)
{
if (!inner->depth)
- ms1_loop_nesting (inner);
+ mt_loop_nesting (inner);
if (inner->depth < 0)
{
/* Determine the length of block BB. */
static int
-ms1_block_length (basic_block bb)
+mt_block_length (basic_block bb)
{
int length = 0;
rtx insn;
insn if it matches DBNZ. */
static bool
-ms1_scan_loop (loop_info loop, rtx reg, rtx dbnz)
+mt_scan_loop (loop_info loop, rtx reg, rtx dbnz)
{
unsigned ix;
loop_info inner;
}
}
for (ix = 0; VEC_iterate (loop_info, loop->loops, ix, inner); ix++)
- if (ms1_scan_loop (inner, reg, NULL_RTX))
+ if (mt_scan_loop (inner, reg, NULL_RTX))
return true;
return false;
subroutine calls in the loop. */
static void
-ms1_reorg_loops (FILE *dump_file)
+mt_reorg_loops (FILE *dump_file)
{
basic_block bb;
loop_info loops = NULL;
loop->predecessor = NULL;
loop->dbnz = tail;
loop->depth = 0;
- loop->length = ms1_block_length (bb);
+ loop->length = mt_block_length (bb);
loop->blocks = VEC_alloc (basic_block, heap, 20);
VEC_quick_push (basic_block, loop->blocks, bb);
loop->loops = NULL;
/* We've not seen this block before. Add it to the loop's
list and then add each successor to the work list. */
bb->aux = loop;
- loop->length += ms1_block_length (bb);
+ loop->length += mt_block_length (bb);
VEC_safe_push (basic_block, heap, loop->blocks, bb);
FOR_EACH_EDGE (e, ei, bb->succs)
{
rtx iter_reg, insn, init_insn;
rtx init_val, loop_end, loop_init, end_label, head_label;
- if (!ms1_loop_nesting (loop))
+ if (!mt_loop_nesting (loop))
{
if (dump_file)
fprintf (dump_file, ";; loop %d is bad\n", loop->loop_no);
}
/* Scan all the blocks to make sure they don't use iter_reg. */
- if (ms1_scan_loop (loop, iter_reg, loop->dbnz))
+ if (mt_scan_loop (loop, iter_reg, loop->dbnz))
{
if (dump_file)
fprintf (dump_file, ";; loop %d uses iterator\n",
} label_info;
/* Chain of labels found in current function, used during reorg. */
-static label_info *ms1_labels;
+static label_info *mt_labels;
/* If *X is a label, add INSN to the list of branches for that
label. */
static int
-ms1_add_branches (rtx *x, void *insn)
+mt_add_branches (rtx *x, void *insn)
{
if (GET_CODE (*x) == LABEL_REF)
{
rtx label = XEXP (*x, 0);
label_info *info;
- for (info = ms1_labels; info; info = info->next)
+ for (info = mt_labels; info; info = info->next)
if (info->label == label)
break;
if (!info)
{
info = xmalloc (sizeof (*info));
- info->next = ms1_labels;
- ms1_labels = info;
+ info->next = mt_labels;
+ mt_labels = info;
info->label = label;
info->branches = NULL;
In that case, the caller must insert nops at the branch target. */
static rtx
-ms1_check_delay_slot (rtx branch, rtx insn)
+mt_check_delay_slot (rtx branch, rtx insn)
{
rtx slot;
rtx tmp;
delay slot. */
static void
-ms1_reorg_hazard (void)
+mt_reorg_hazard (void)
{
rtx insn, next;
jmp = NEXT_INSN (jmp))
continue;
- for_each_rtx (&PATTERN (jmp), ms1_add_branches, insn);
+ for_each_rtx (&PATTERN (jmp), mt_add_branches, insn);
}
/* Now scan for dependencies. */
label_info *label;
branch_info *branch;
- for (label = ms1_labels;
+ for (label = mt_labels;
label;
label = label->next)
if (label->label == prev)
branch;
branch = branch->next)
{
- tmp = ms1_check_delay_slot (branch->insn, jmp);
+ tmp = mt_check_delay_slot (branch->insn, jmp);
if (tmp == branch->insn)
{
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
/* Look at the delay slot. */
- tmp = ms1_check_delay_slot (prev, jmp);
+ tmp = mt_check_delay_slot (prev, jmp);
if (tmp == prev)
nops = count;
break;
}
/* Free the data structures. */
- while (ms1_labels)
+ while (mt_labels)
{
- label_info *label = ms1_labels;
+ label_info *label = mt_labels;
branch_info *branch, *next;
- ms1_labels = label->next;
+ mt_labels = label->next;
for (branch = label->branches; branch; branch = next)
{
next = branch->next;
scheduling hazards. */
static void
-ms1_machine_reorg (void)
+mt_machine_reorg (void)
{
if (cfun->machine->has_loops && TARGET_MS2)
- ms1_reorg_loops (dump_file);
+ mt_reorg_loops (dump_file);
- if (ms1_flag_delayed_branch)
+ if (mt_flag_delayed_branch)
dbr_schedule (get_insns (), dump_file);
if (TARGET_MS2)
- ms1_reorg_hazard ();
+ mt_reorg_hazard ();
}
\f
/* Initialize the GCC target structure. */
-const struct attribute_spec ms1_attribute_table[];
+const struct attribute_spec mt_attribute_table[];
#undef TARGET_ATTRIBUTE_TABLE
-#define TARGET_ATTRIBUTE_TABLE ms1_attribute_table
+#define TARGET_ATTRIBUTE_TABLE mt_attribute_table
#undef TARGET_STRUCT_VALUE_RTX
-#define TARGET_STRUCT_VALUE_RTX ms1_struct_value_rtx
+#define TARGET_STRUCT_VALUE_RTX mt_struct_value_rtx
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
#undef TARGET_PASS_BY_REFERENCE
-#define TARGET_PASS_BY_REFERENCE ms1_pass_by_reference
+#define TARGET_PASS_BY_REFERENCE mt_pass_by_reference
#undef TARGET_MUST_PASS_IN_STACK
-#define TARGET_MUST_PASS_IN_STACK ms1_pass_in_stack
+#define TARGET_MUST_PASS_IN_STACK mt_pass_in_stack
#undef TARGET_ARG_PARTIAL_BYTES
-#define TARGET_ARG_PARTIAL_BYTES ms1_arg_partial_bytes
+#define TARGET_ARG_PARTIAL_BYTES mt_arg_partial_bytes
#undef TARGET_MACHINE_DEPENDENT_REORG
-#define TARGET_MACHINE_DEPENDENT_REORG ms1_machine_reorg
+#define TARGET_MACHINE_DEPENDENT_REORG mt_machine_reorg
struct gcc_target targetm = TARGET_INITIALIZER;
\f
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
-extern struct rtx_def * ms1_ucmpsi3_libcall;
+extern struct rtx_def * mt_ucmpsi3_libcall;
enum processor_type
{
NORMAL_EPILOGUE
};
-extern enum processor_type ms1_cpu;
+extern enum processor_type mt_cpu;
\f
/* A C string constant that tells the GCC driver program options to pass to
#define TARGET_CPU_CPP_BUILTINS() \
do \
{ \
- builtin_define_std ("ms1"); \
- builtin_assert ("machine=ms1"); \
+ builtin_define_std ("mt"); \
+ builtin_assert ("machine=mt"); \
} \
while (0)
-#define TARGET_MS1_64_001 (ms1_cpu == PROCESSOR_MS1_64_001)
-#define TARGET_MS1_16_002 (ms1_cpu == PROCESSOR_MS1_16_002)
-#define TARGET_MS1_16_003 (ms1_cpu == PROCESSOR_MS1_16_003)
-#define TARGET_MS2 (ms1_cpu == PROCESSOR_MS2)
+#define TARGET_MS1_64_001 (mt_cpu == PROCESSOR_MS1_64_001)
+#define TARGET_MS1_16_002 (mt_cpu == PROCESSOR_MS1_16_002)
+#define TARGET_MS1_16_003 (mt_cpu == PROCESSOR_MS1_16_003)
+#define TARGET_MS2 (mt_cpu == PROCESSOR_MS2)
-#define TARGET_VERSION fprintf (stderr, " (ms1)");
+#define TARGET_VERSION fprintf (stderr, " (mt)");
-#define OVERRIDE_OPTIONS ms1_override_options ()
+#define OVERRIDE_OPTIONS mt_override_options ()
#define CAN_DEBUG_WITHOUT_FP 1
#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
- ms1_secondary_reload_class((CLASS), (MODE), (X))
+ mt_secondary_reload_class((CLASS), (MODE), (X))
/* A C expression for the maximum number of consecutive registers of
class CLASS needed to hold a value of mode MODE. */
(`G', `H') that specify particular ranges of `const_double' values. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) 0
-/* Most negative value represent on ms1 */
-#define MS1_MIN_INT 0x80000000
+/* Most negative value represent on mt */
+#define MT_MIN_INT 0x80000000
\f
/* Basic Stack Layout. */
#define FIRST_PARM_OFFSET(FUNDECL) 0
#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
- ms1_return_addr_rtx (COUNT)
+ mt_return_addr_rtx (COUNT)
/* A C expression whose value is RTL representing the location of the incoming
return address at the beginning of any function, before the prologue. This
/* Structure to be filled in by compute_frame_size with register
save masks, and offsets for the current function. */
-struct ms1_frame_info
+struct mt_frame_info
{
unsigned int total_size; /* # Bytes that the entire frame takes up. */
unsigned int pretend_size; /* # Bytes we push and pretend caller did. */
int initialized; /* Nonzero if frame size already calculated. */
};
-extern struct ms1_frame_info current_frame_info;
+extern struct mt_frame_info current_frame_info;
/* If defined, this macro specifies a table of register pairs used to eliminate
unneeded registers that point into the stack frame. */
registers. This macro must be defined if `ELIMINABLE_REGS' is
defined. */
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
- (OFFSET) = ms1_initial_elimination_offset (FROM, TO)
+ (OFFSET) = mt_initial_elimination_offset (FROM, TO)
/* If defined, the maximum amount of space required for outgoing
arguments will be computed and placed into the variable
#define OUTGOING_REG_PARM_STACK_SPACE
/* The number of register assigned to holding function arguments. */
-#define MS1_NUM_ARG_REGS 4
+#define MT_NUM_ARG_REGS 4
/* Define this if it is the responsibility of the caller to allocate
the area reserved for arguments passed in registers. */
-#define REG_PARM_STACK_SPACE(FNDECL) (MS1_NUM_ARG_REGS * UNITS_PER_WORD)
+#define REG_PARM_STACK_SPACE(FNDECL) (MT_NUM_ARG_REGS * UNITS_PER_WORD)
/* Define this macro if `REG_PARM_STACK_SPACE' is defined, but the stack
parameters don't skip the area specified by it. */
#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- ms1_function_arg (& (CUM), (MODE), (TYPE), (NAMED), FALSE)
+ mt_function_arg (& (CUM), (MODE), (TYPE), (NAMED), FALSE)
#define CUMULATIVE_ARGS int
-#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
- ms1_init_cumulative_args (& (CUM), FNTYPE, LIBNAME, FNDECL, FALSE)
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ mt_init_cumulative_args (& (CUM), FNTYPE, LIBNAME, FNDECL, FALSE)
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
- ms1_function_arg_advance (&CUM, MODE, TYPE, NAMED)
+ mt_function_arg_advance (&CUM, MODE, TYPE, NAMED)
#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
- ms1_function_arg_boundary (MODE, TYPE)
+ mt_function_arg_boundary (MODE, TYPE)
#define FUNCTION_ARG_REGNO_P(REGNO) \
((REGNO) >= FIRST_ARG_REGNUM && ((REGNO) <= LAST_ARG_REGNUM))
#define RETURN_VALUE_REGNUM RETVAL_REGNUM
#define FUNCTION_VALUE(VALTYPE, FUNC) \
- ms1_function_value (VALTYPE, TYPE_MODE(VALTYPE), FUNC)
+ mt_function_value (VALTYPE, TYPE_MODE(VALTYPE), FUNC)
#define LIBCALL_VALUE(MODE) \
- ms1_function_value (NULL_TREE, MODE, NULL_TREE)
+ mt_function_value (NULL_TREE, MODE, NULL_TREE)
#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RETURN_VALUE_REGNUM)
adjust the stack pointer before a return from the function. */
#define EXIT_IGNORE_STACK 1
-#define EPILOGUE_USES(REGNO) ms1_epilogue_uses(REGNO)
+#define EPILOGUE_USES(REGNO) mt_epilogue_uses(REGNO)
/* Define this macro if the function epilogue contains delay slots to which
instructions from the rest of the function can be "moved". */
#define FUNCTION_PROFILER(FILE, LABELNO) gcc_unreachable ()
#define EXPAND_BUILTIN_VA_START(VALIST, NEXTARG) \
- ms1_va_start (VALIST, NEXTARG)
+ mt_va_start (VALIST, NEXTARG)
/* Trampolines are not implemented. */
#define TRAMPOLINE_SIZE 0
#ifdef REG_OK_STRICT
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ \
- if (ms1_legitimate_address_p (MODE, X, 1)) \
+ if (mt_legitimate_address_p (MODE, X, 1)) \
goto ADDR; \
}
#else
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ \
- if (ms1_legitimate_address_p (MODE, X, 0)) \
+ if (mt_legitimate_address_p (MODE, X, 0)) \
goto ADDR; \
}
#endif
#ifdef REG_OK_STRICT
-#define REG_OK_FOR_BASE_P(X) ms1_reg_ok_for_base_p (X, 1)
+#define REG_OK_FOR_BASE_P(X) mt_reg_ok_for_base_p (X, 1)
#else
-#define REG_OK_FOR_BASE_P(X) ms1_reg_ok_for_base_p (X, 0)
+#define REG_OK_FOR_BASE_P(X) mt_reg_ok_for_base_p (X, 0)
#endif
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
desire, and increment the variable PTR to point at the end of the opcode so
that it will not be output twice. */
#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
- (PTR) = ms1_asm_output_opcode (STREAM, PTR)
+ (PTR) = mt_asm_output_opcode (STREAM, PTR)
#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
- ms1_final_prescan_insn (INSN, OPVEC, NOPERANDS)
+ mt_final_prescan_insn (INSN, OPVEC, NOPERANDS)
-#define PRINT_OPERAND(STREAM, X, CODE) ms1_print_operand (STREAM, X, CODE)
+#define PRINT_OPERAND(STREAM, X, CODE) mt_print_operand (STREAM, X, CODE)
/* A C expression which evaluates to true if CODE is a valid punctuation
character for use in the `PRINT_OPERAND' macro. */
/* #: Print nop for delay slot. */
#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '#')
-#define PRINT_OPERAND_ADDRESS(STREAM, X) ms1_print_operand_address (STREAM, X)
+#define PRINT_OPERAND_ADDRESS(STREAM, X) mt_print_operand_address (STREAM, X)
/* If defined, C string expressions to be used for the `%R', `%L', `%U', and
`%I' options of `asm_fprintf' (see `final.c'). These are useful when a
stored from the compare operation. Note that we can't use "rtx" here
since it hasn't been defined! */
-extern struct rtx_def * ms1_compare_op0;
-extern struct rtx_def * ms1_compare_op1;
+extern struct rtx_def * mt_compare_op0;
+extern struct rtx_def * mt_compare_op1;
\f
;; Delay Slots
-;; The ms1 does not allow branches in the delay slot.
-;; The ms1 does not allow back to back memory or io instruction.
+;; The mt does not allow branches in the delay slot.
+;; The mt does not allow back to back memory or io instruction.
;; The compiler does not know what the type of instruction is at
;; the destination of the branch. Thus, only type that will be acceptable
;; (safe) is the arith type.
(const_int -1)))
(clobber (match_scratch:SI 5 ""))])]
"TARGET_MS1_16_003 || TARGET_MS2"
- {ms1_add_loop ();})
+ {mt_add_loop ();})
\f
;; Moves
start_sequence ();
emit_insn (gen_storeqi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
- ms1_set_memflags (operands[0]);
+ mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_loadqi (gen_lowpart (SImode, data), address, scratch1));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_movsi (gen_lowpart (SImode, data), address));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_storehi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
- ms1_set_memflags (operands[0]);
+ mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_loadhi (gen_lowpart (SImode, data), address,
scratch1));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_movsi (gen_lowpart (SImode, data), address));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
"{
/* figure out what precisely to put into operands 2, 3, 4, and 5 */
- ms1_split_words (SImode, DFmode, operands);
+ mt_split_words (SImode, DFmode, operands);
}"
)
start_sequence ();
emit_insn (gen_loadqi (gen_lowpart (SImode, data), address, scratch1));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_storeqi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
- ms1_set_memflags (operands[0]);
+ mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_loadhi (gen_lowpart (SImode, data), address,
scratch1));
- ms1_set_memflags (operands[1]);
+ mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
start_sequence ();
emit_insn (gen_storehi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
- ms1_set_memflags (operands[0]);
+ mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
""
"
{
- ms1_compare_op0 = operands[0];
- ms1_compare_op1 = operands[1];
+ mt_compare_op0 = operands[0];
+ mt_compare_op1 = operands[1];
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (EQ, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (EQ, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (NE, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (NE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (GE, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (GE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (GT, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (GT, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (LE, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (LE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (LT, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (LT, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (GEU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (GEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (GTU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (GTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (LEU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (LEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (LTU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (LTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(use (match_operand 0 "" ""))]
""
"
-{ ms1_emit_cbranch (GEU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+{
+ mt_emit_cbranch (GEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
""
"
{
- ms1_emit_cbranch (GTU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+ mt_emit_cbranch (GTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
""
"
{
- ms1_emit_cbranch (LEU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+ mt_emit_cbranch (LEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
""
"
{
- ms1_emit_cbranch (LTU, operands[0],
- ms1_compare_op0, ms1_compare_op1);
+ mt_emit_cbranch (LTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
[(set_attr "length" "4")
(set_attr "type" "branch")])
-;; No unsigned operators on Morpho ms1. All the unsigned operations are
+;; No unsigned operators on Morpho mt. All the unsigned operations are
;; converted to the signed operations above.
\f
;; "seq", "sne", "slt", "sle", "sgt", "sge", "sltu", "sleu",
;; "sgtu", and "sgeu" don't exist as regular instruction on the
-;; ms1, so these are not defined
+;; mt, so these are not defined
;; Call and branch instructions
""
"
{
- ms1_expand_prologue ();
+ mt_expand_prologue ();
DONE;
}")
""
"
{
- ms1_expand_epilogue (NORMAL_EPILOGUE);
+ mt_expand_epilogue (NORMAL_EPILOGUE);
DONE;
}")
""
"
{
- ms1_expand_eh_return (operands);
+ mt_expand_eh_return (operands);
DONE;
}")
"#"
"reload_completed"
[(const_int 1)]
- "ms1_emit_eh_epilogue (operands); DONE;"
+ "mt_emit_eh_epilogue (operands); DONE;"
)
\f
;; No operation, needed in case the user uses -g but not -O.
""
"
{
- operands[2] = ms1_compare_op0;
- operands[3] = ms1_compare_op1;
+ operands[2] = mt_compare_op0;
+ operands[3] = mt_compare_op1;
}")
;; Templates to control handling of interrupts