#define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
/* This table must be in sync with enum processor_type in i386.h. */
-const struct ptt processor_target_table[PROCESSOR_max] =
+const char *const processor_names[PROCESSOR_max] =
{
- /* The "0:0:8" label alignment specified for some processors generates
- secondary 8-byte alignment only for those label/jump/loop targets
- which have primary alignment. */
-
- {"generic", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"i386", "4", "4", NULL, "4" },
- {"i486", "16", "16", "0:0:8", "16"},
- {"pentium", "16:8:8", "16:8:8", "0:0:8", "16"},
- {"lakemont", "16:8:8", "16:8:8", "0:0:8", "16"},
- {"pentiumpro", "16", "16:11:8", "0:0:8", "16"},
- {"pentium4", NULL, NULL, NULL, NULL},
- {"nocona", NULL, NULL, NULL, NULL},
- {"core2", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"nehalem", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"sandybridge", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"haswell", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"bonnell", "16", "16:8:8", "0:0:8", "16"},
- {"silvermont", "16", "16:8:8", "0:0:8", "16"},
- {"goldmont", "16", "16:8:8", "0:0:8", "16"},
- {"goldmont-plus", "16", "16:8:8", "0:0:8", "16"},
- {"tremont", "16", "16:8:8", "0:0:8", "16"},
- {"knl", "16", "16:8:8", "0:0:8", "16"},
- {"knm", "16", "16:8:8", "0:0:8", "16"},
- {"skylake", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"skylake-avx512", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"cannonlake", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"icelake-client", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"icelake-server", "16:11:8", "16:11:8", "0:0:8", "16"},
- {"intel", "16", "16:8:8", "0:0:8", "16"},
- {"geode", NULL, NULL, NULL, NULL},
- {"k6", "32:8:8", "32:8:8", "0:0:8", "32"},
- {"athlon", "16:8:8", "16:8:8", "0:0:8", "16"},
- {"k8", "16:8:8", "16:8:8", "0:0:8", "16"},
- {"amdfam10", "32:25:8", "32:8:8", "0:0:8", "32"},
- {"bdver1", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"bdver2", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"bdver3", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"bdver4", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"btver1", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"btver2", "16:11:8", "16:8:8", "0:0:8", "11"},
- {"znver1", "16", "16", "0:0:8", "16"}
+ "generic",
+ "i386",
+ "i486",
+ "pentium",
+ "lakemont",
+ "pentiumpro",
+ "pentium4",
+ "nocona",
+ "core2",
+ "nehalem",
+ "sandybridge",
+ "haswell",
+ "bonnell",
+ "silvermont",
+ "goldmont",
+ "goldmont-plus",
+ "tremont",
+ "knl",
+ "knm",
+ "skylake",
+ "skylake-avx512",
+ "cannonlake",
+ "icelake-client",
+ "icelake-server",
+ "intel",
+ "geode",
+ "k6",
+ "athlon",
+ "k8",
+ "amdfam10",
+ "bdver1",
+ "bdver2",
+ "bdver3",
+ "bdver4",
+ "btver1",
+ "btver2",
+ "znver1"
};
const pta processor_alias_table[] =
break;
case OPT_mtune_:
for (unsigned i = 0; i < PROCESSOR_max; i++)
- v.safe_push (processor_target_table[i].name);
+ v.safe_push (processor_names[i]);
break;
default:
break;
ix86_size_memset,
COSTS_N_BYTES (1), /* cond_taken_branch_cost. */
COSTS_N_BYTES (1), /* cond_not_taken_branch_cost. */
+ NULL, /* Loop alignment. */
+ NULL, /* Jump alignment. */
+ NULL, /* Label alignment. */
+ NULL, /* Func alignment. */
};
/* Processor costs (relative to an add) */
i386_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "4", /* Loop alignment. */
+ "4", /* Jump alignment. */
+ NULL, /* Label alignment. */
+ "4", /* Func alignment. */
};
static stringop_algs i486_memcpy[2] = {
i486_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
static stringop_algs pentium_memcpy[2] = {
pentium_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:8:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
static const
pentium_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:8:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
pentiumpro_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16:11:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
static stringop_algs geode_memcpy[2] = {
geode_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ NULL, /* Loop alignment. */
+ NULL, /* Jump alignment. */
+ NULL, /* Label alignment. */
+ NULL, /* Func alignment. */
};
static stringop_algs k6_memcpy[2] = {
k6_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "32:8:8", /* Loop alignment. */
+ "32:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "32", /* Func alignment. */
};
/* For some reason, Athlon deals better with REP prefix (relative to loops)
athlon_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:8:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* K8 has optimized REP instruction for medium sized blocks, but for very
k8_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:8:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
amdfam10_memset,
COSTS_N_INSNS (2), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "32:25:8", /* Loop alignment. */
+ "32:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "32", /* Func alignment. */
};
/* BDVER1 has optimized REP instruction for medium sized blocks, but for
bdver1_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
/* BDVER2 has optimized REP instruction for medium sized blocks, but for
bdver2_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
bdver3_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
/* BDVER4 has optimized REP instruction for medium sized blocks, but for
bdver4_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
znver1_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* skylake_cost should produce code tuned for Skylake familly of CPUs. */
skylake_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:11:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* BTVER1 has optimized REP instruction for medium sized blocks, but for
very small blocks it is better to use loop. For large blocks, libcall can
btver1_memset,
COSTS_N_INSNS (2), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
static stringop_algs btver2_memcpy[2] = {
btver2_memset,
COSTS_N_INSNS (2), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "11", /* Func alignment. */
};
static stringop_algs pentium4_memcpy[2] = {
pentium4_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ NULL, /* Loop alignment. */
+ NULL, /* Jump alignment. */
+ NULL, /* Label alignment. */
+ NULL, /* Func alignment. */
};
static stringop_algs nocona_memcpy[2] = {
nocona_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ NULL, /* Loop alignment. */
+ NULL, /* Jump alignment. */
+ NULL, /* Label alignment. */
+ NULL, /* Func alignment. */
};
static stringop_algs atom_memcpy[2] = {
atom_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
static stringop_algs slm_memcpy[2] = {
slm_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
static stringop_algs intel_memcpy[2] = {
intel_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16", /* Loop alignment. */
+ "16:8:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* Generic should produce code tuned for Core-i7 (and newer chips)
generic_memset,
COSTS_N_INSNS (4), /* cond_taken_branch_cost. */
COSTS_N_INSNS (2), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:11:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};
/* core_cost should produce code tuned for Core familly of CPUs. */
core_memset,
COSTS_N_INSNS (3), /* cond_taken_branch_cost. */
COSTS_N_INSNS (1), /* cond_not_taken_branch_cost. */
+ "16:11:8", /* Loop alignment. */
+ "16:11:8", /* Jump alignment. */
+ "0:0:8", /* Label alignment. */
+ "16", /* Func alignment. */
};