4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #define DATA_TYPE uint64_t
27 #define DATA_TYPE uint32_t
31 #define DATA_TYPE uint16_t
32 #define DATA_STYPE int16_t
36 #define DATA_TYPE uint8_t
37 #define DATA_STYPE int8_t
39 #error unsupported data size
42 #if ACCESS_TYPE < (NB_MMU_MODES)
44 #define CPU_MMU_INDEX ACCESS_TYPE
45 #define MMUSUFFIX _mmu
47 #elif ACCESS_TYPE == (NB_MMU_MODES)
49 #define CPU_MMU_INDEX (cpu_mmu_index(env))
50 #define MMUSUFFIX _mmu
52 #elif ACCESS_TYPE == (NB_MMU_MODES + 1)
54 #define CPU_MMU_INDEX (cpu_mmu_index(env))
55 #define MMUSUFFIX _cmmu
58 #error invalid ACCESS_TYPE
62 #define RES_TYPE uint64_t
67 #if ACCESS_TYPE == (NB_MMU_MODES + 1)
68 #define ADDR_READ addr_code
70 #define ADDR_READ addr_read
73 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
74 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
76 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
80 asm volatile ("movl %1, %%edx\n"
85 "leal %5(%%edx, %%ebp), %%edx\n"
86 "cmpl (%%edx), %%eax\n"
94 "addl 12(%%edx), %%eax\n"
96 "movzbl (%%eax), %0\n"
98 "movzwl (%%eax), %0\n"
102 #error unsupported size
107 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
108 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
109 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
110 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
112 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
113 : "%eax", "%ecx", "%edx", "memory", "cc");
118 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
122 asm volatile ("movl %1, %%edx\n"
127 "leal %5(%%edx, %%ebp), %%edx\n"
128 "cmpl (%%edx), %%eax\n"
138 #error unsupported size
142 "addl 12(%%edx), %%eax\n"
144 "movsbl (%%eax), %0\n"
146 "movswl (%%eax), %0\n"
148 #error unsupported size
153 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
154 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
155 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
156 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
158 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
159 : "%eax", "%ecx", "%edx", "memory", "cc");
164 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
166 asm volatile ("movl %0, %%edx\n"
171 "leal %5(%%edx, %%ebp), %%edx\n"
172 "cmpl (%%edx), %%eax\n"
176 "movzbl %b1, %%edx\n"
178 "movzwl %w1, %%edx\n"
182 #error unsupported size
188 "addl 8(%%edx), %%eax\n"
190 "movb %b1, (%%eax)\n"
192 "movw %w1, (%%eax)\n"
196 #error unsupported size
206 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
207 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
208 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
209 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
211 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
212 : "%eax", "%ecx", "%edx", "memory", "cc");
217 /* generic load/store macros */
219 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
224 unsigned long physaddr;
228 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
229 mmu_idx = CPU_MMU_INDEX;
230 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
231 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
232 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
234 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
235 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
241 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
245 unsigned long physaddr;
249 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
250 mmu_idx = CPU_MMU_INDEX;
251 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
252 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
253 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
255 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
256 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
262 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
264 /* generic store macro */
266 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
270 unsigned long physaddr;
274 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
275 mmu_idx = CPU_MMU_INDEX;
276 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
277 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
278 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
280 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
281 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
285 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
289 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
292 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
298 u.i = glue(ldq, MEMSUFFIX)(ptr);
302 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v)
309 glue(stq, MEMSUFFIX)(ptr, u.i);
311 #endif /* DATA_SIZE == 8 */
314 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr)
320 u.i = glue(ldl, MEMSUFFIX)(ptr);
324 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
331 glue(stl, MEMSUFFIX)(ptr, u.i);
333 #endif /* DATA_SIZE == 4 */
335 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */