const struct processor_costs *ix86_cost = &pentium_cost;
/* Processor feature/optimization bitmasks. */
-#define m_386 (1<<PROCESSOR_I386)
-#define m_486 (1<<PROCESSOR_I486)
-#define m_PENT (1<<PROCESSOR_PENTIUM)
-#define m_LAKEMONT (1<<PROCESSOR_LAKEMONT)
-#define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
-#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
-#define m_NOCONA (1<<PROCESSOR_NOCONA)
+#define m_386 (1U<<PROCESSOR_I386)
+#define m_486 (1U<<PROCESSOR_I486)
+#define m_PENT (1U<<PROCESSOR_PENTIUM)
+#define m_LAKEMONT (1U<<PROCESSOR_LAKEMONT)
+#define m_PPRO (1U<<PROCESSOR_PENTIUMPRO)
+#define m_PENT4 (1U<<PROCESSOR_PENTIUM4)
+#define m_NOCONA (1U<<PROCESSOR_NOCONA)
#define m_P4_NOCONA (m_PENT4 | m_NOCONA)
-#define m_CORE2 (1<<PROCESSOR_CORE2)
-#define m_NEHALEM (1<<PROCESSOR_NEHALEM)
-#define m_SANDYBRIDGE (1<<PROCESSOR_SANDYBRIDGE)
-#define m_HASWELL (1<<PROCESSOR_HASWELL)
+#define m_CORE2 (1U<<PROCESSOR_CORE2)
+#define m_NEHALEM (1U<<PROCESSOR_NEHALEM)
+#define m_SANDYBRIDGE (1U<<PROCESSOR_SANDYBRIDGE)
+#define m_HASWELL (1U<<PROCESSOR_HASWELL)
#define m_CORE_ALL (m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_HASWELL)
-#define m_BONNELL (1<<PROCESSOR_BONNELL)
-#define m_SILVERMONT (1<<PROCESSOR_SILVERMONT)
-#define m_KNL (1<<PROCESSOR_KNL)
-#define m_SKYLAKE_AVX512 (1<<PROCESSOR_SKYLAKE_AVX512)
-#define m_INTEL (1<<PROCESSOR_INTEL)
-
-#define m_GEODE (1<<PROCESSOR_GEODE)
-#define m_K6 (1<<PROCESSOR_K6)
+#define m_BONNELL (1U<<PROCESSOR_BONNELL)
+#define m_SILVERMONT (1U<<PROCESSOR_SILVERMONT)
+#define m_KNL (1U<<PROCESSOR_KNL)
+#define m_SKYLAKE_AVX512 (1U<<PROCESSOR_SKYLAKE_AVX512)
+#define m_INTEL (1U<<PROCESSOR_INTEL)
+
+#define m_GEODE (1U<<PROCESSOR_GEODE)
+#define m_K6 (1U<<PROCESSOR_K6)
#define m_K6_GEODE (m_K6 | m_GEODE)
-#define m_K8 (1<<PROCESSOR_K8)
-#define m_ATHLON (1<<PROCESSOR_ATHLON)
+#define m_K8 (1U<<PROCESSOR_K8)
+#define m_ATHLON (1U<<PROCESSOR_ATHLON)
#define m_ATHLON_K8 (m_K8 | m_ATHLON)
-#define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
-#define m_BDVER1 (1<<PROCESSOR_BDVER1)
-#define m_BDVER2 (1<<PROCESSOR_BDVER2)
-#define m_BDVER3 (1<<PROCESSOR_BDVER3)
-#define m_BDVER4 (1<<PROCESSOR_BDVER4)
-#define m_ZNVER1 (1<<PROCESSOR_ZNVER1)
-#define m_BTVER1 (1<<PROCESSOR_BTVER1)
-#define m_BTVER2 (1<<PROCESSOR_BTVER2)
+#define m_AMDFAM10 (1U<<PROCESSOR_AMDFAM10)
+#define m_BDVER1 (1U<<PROCESSOR_BDVER1)
+#define m_BDVER2 (1U<<PROCESSOR_BDVER2)
+#define m_BDVER3 (1U<<PROCESSOR_BDVER3)
+#define m_BDVER4 (1U<<PROCESSOR_BDVER4)
+#define m_ZNVER1 (1U<<PROCESSOR_ZNVER1)
+#define m_BTVER1 (1U<<PROCESSOR_BTVER1)
+#define m_BTVER2 (1U<<PROCESSOR_BTVER2)
#define m_BDVER (m_BDVER1 | m_BDVER2 | m_BDVER3 | m_BDVER4)
#define m_BTVER (m_BTVER1 | m_BTVER2)
#define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER \
| m_ZNVER1)
-#define m_GENERIC (1<<PROCESSOR_GENERIC)
+#define m_GENERIC (1U<<PROCESSOR_GENERIC)
const char* ix86_tune_feature_names[X86_TUNE_LAST] = {
#undef DEF_TUNE
on simulation result. But after P4 was made, no performance benefit
was observed with branch hints. It also increases the code size.
As a result, icc never generates branch hints. */
-DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", 0)
+DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", 0U)
/* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */
-DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0)
+DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0U)
/* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit
arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme
is usually used for RISC targets. */
-DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0)
+DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0U)
/* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based
on hardware capabilities. Bdver3 hardware has a loop buffer which makes