4 * Generate inline load/store functions for one MMU mode and data
7 * Generate a store function as well as signed and unsigned loads. For
8 * 32 and 64 bit cases, also generate floating point functions with
11 * Not used directly but included from softmmu_exec.h and exec-all.h.
13 * Copyright (c) 2003 Fabrice Bellard
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
31 #define DATA_TYPE uint64_t
35 #define DATA_TYPE uint32_t
39 #define DATA_TYPE uint16_t
40 #define DATA_STYPE int16_t
44 #define DATA_TYPE uint8_t
45 #define DATA_STYPE int8_t
47 #error unsupported data size
50 #if ACCESS_TYPE < (NB_MMU_MODES)
52 #define CPU_MMU_INDEX ACCESS_TYPE
53 #define MMUSUFFIX _mmu
55 #elif ACCESS_TYPE == (NB_MMU_MODES)
57 #define CPU_MMU_INDEX (cpu_mmu_index(env))
58 #define MMUSUFFIX _mmu
60 #elif ACCESS_TYPE == (NB_MMU_MODES + 1)
62 #define CPU_MMU_INDEX (cpu_mmu_index(env))
63 #define MMUSUFFIX _cmmu
66 #error invalid ACCESS_TYPE
70 #define RES_TYPE uint64_t
72 #define RES_TYPE uint32_t
75 #if ACCESS_TYPE == (NB_MMU_MODES + 1)
76 #define ADDR_READ addr_code
78 #define ADDR_READ addr_read
81 /* generic load/store macros */
83 static inline RES_TYPE
84 glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
92 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
93 mmu_idx = CPU_MMU_INDEX;
94 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
95 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
96 res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
98 uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
99 res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
106 glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
113 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
114 mmu_idx = CPU_MMU_INDEX;
115 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
116 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
117 res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
118 MMUSUFFIX)(env, addr, mmu_idx);
120 uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
121 res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
127 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
129 /* generic store macro */
132 glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
140 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
141 mmu_idx = CPU_MMU_INDEX;
142 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
143 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
144 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
146 uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
147 glue(glue(st, SUFFIX), _raw)(hostaddr, v);
151 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
153 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
156 static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
163 u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
167 static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
168 target_ulong ptr, float64 v)
175 glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
177 #endif /* DATA_SIZE == 8 */
180 static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
187 u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
191 static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
192 target_ulong ptr, float32 v)
199 glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
201 #endif /* DATA_SIZE == 4 */
203 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */