for(;;) {
tb1 = *ptb;
- n1 = (long)tb1 & 3;
- tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ n1 = (uintptr_t)tb1 & 3;
+ tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
if (tb1 == tb) {
*ptb = tb1->page_next[n1];
break;
/* find tb(n) in circular list */
for(;;) {
tb1 = *ptb;
- n1 = (long)tb1 & 3;
- tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ n1 = (uintptr_t)tb1 & 3;
+ tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
if (n1 == n && tb1 == tb)
break;
if (n1 == 2) {
another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
- tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
+ tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
}
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* suppress any remaining jumps to this TB */
tb1 = tb->jmp_first;
for(;;) {
- n1 = (long)tb1 & 3;
+ n1 = (uintptr_t)tb1 & 3;
if (n1 == 2)
break;
- tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
tb2 = tb1->jmp_next[n1];
tb_reset_jump(tb1, n1);
tb1->jmp_next[n1] = NULL;
tb1 = tb2;
}
- tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
+ tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
tb_phys_invalidate_count++;
}
tb = p->first_tb;
while (tb != NULL) {
- n = (long)tb & 3;
- tb = (TranslationBlock *)((long)tb & ~3);
+ n = (uintptr_t)tb & 3;
+ tb = (TranslationBlock *)((uintptr_t)tb & ~3);
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
tb->flags = flags;
tb->cflags = cflags;
cpu_gen_code(env, tb, &code_gen_size);
- code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
+ code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
+ CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
/* XXX: see if in some cases it could be faster to invalidate all the code */
tb = p->first_tb;
while (tb != NULL) {
- n = (long)tb & 3;
- tb = (TranslationBlock *)((long)tb & ~3);
+ n = (uintptr_t)tb & 3;
+ tb = (TranslationBlock *)((uintptr_t)tb & ~3);
tb_next = tb->page_next[n];
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
cpu_single_env->mem_io_vaddr, len,
cpu_single_env->eip,
- cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
+ cpu_single_env->eip +
+ (intptr_t)cpu_single_env->segs[R_CS].base);
}
#endif
p = page_find(start >> TARGET_PAGE_BITS);
}
#endif
while (tb != NULL) {
- n = (long)tb & 3;
- tb = (TranslationBlock *)((long)tb & ~3);
+ n = (uintptr_t)tb & 3;
+ tb = (TranslationBlock *)((uintptr_t)tb & ~3);
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) {
#ifndef CONFIG_USER_ONLY
page_already_protected = p->first_tb != NULL;
#endif
- p->first_tb = (TranslationBlock *)((long)tb | n);
+ p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
invalidate_page_bitmap(p);
#if defined(TARGET_HAS_SMC) || 1
else
tb->page_addr[1] = -1;
- tb->jmp_first = (TranslationBlock *)((long)tb | 2);
+ tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
tb->jmp_next[0] = NULL;
tb->jmp_next[1] = NULL;
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
{
int m_min, m_max, m;
- unsigned long v;
+ uintptr_t v;
TranslationBlock *tb;
if (nb_tbs <= 0)
return NULL;
- if (tc_ptr < (unsigned long)code_gen_buffer ||
- tc_ptr >= (unsigned long)code_gen_ptr)
+ if (tc_ptr < (uintptr_t)code_gen_buffer ||
+ tc_ptr >= (uintptr_t)code_gen_ptr) {
return NULL;
+ }
/* binary search (cf Knuth) */
m_min = 0;
m_max = nb_tbs - 1;
while (m_min <= m_max) {
m = (m_min + m_max) >> 1;
tb = &tbs[m];
- v = (unsigned long)tb->tc_ptr;
+ v = (uintptr_t)tb->tc_ptr;
if (v == tc_ptr)
return tb;
else if (tc_ptr < v) {
if (tb1 != NULL) {
/* find head of list */
for(;;) {
- n1 = (long)tb1 & 3;
- tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ n1 = (uintptr_t)tb1 & 3;
+ tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
if (n1 == 2)
break;
tb1 = tb1->jmp_next[n1];
ptb = &tb_next->jmp_first;
for(;;) {
tb1 = *ptb;
- n1 = (long)tb1 & 3;
- tb1 = (TranslationBlock *)((long)tb1 & ~3);
+ n1 = (uintptr_t)tb1 & 3;
+ tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
if (n1 == n && tb1 == tb)
break;
ptb = &tb1->jmp_next[n1];
}
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
- unsigned long start, unsigned long length)
+ uintptr_t start, uintptr_t length)
{
- unsigned long addr;
+ uintptr_t addr;
if (tlb_is_dirty_ram(tlb_entry)) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
int dirty_flags)
{
CPUArchState *env;
- unsigned long length, start1;
+ uintptr_t length, start1;
int i;
start &= TARGET_PAGE_MASK;
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
- start1 = (unsigned long)qemu_safe_ram_ptr(start);
+ start1 = (uintptr_t)qemu_safe_ram_ptr(start);
/* Check that we don't span multiple blocks - this breaks the
address comparisons below. */
- if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
+ if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
!= (end - 1) - start) {
abort();
}
void *p;
if (tlb_is_dirty_ram(tlb_entry)) {
- p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ tlb_entry->addend);
ram_addr = qemu_ram_addr_from_host_nofail(p);
if (!cpu_physical_memory_is_dirty(ram_addr)) {
unsigned int index;
target_ulong address;
target_ulong code_address;
- unsigned long addend;
+ uintptr_t addend;
CPUTLBEntry *te;
CPUWatchpoint *wp;
target_phys_addr_t iotlb;
address |= TLB_MMIO;
}
if (is_ram_rom_romd(section)) {
- addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
+ addend = (uintptr_t)memory_region_get_ram_ptr(section->mr)
+ section_addr(section, paddr);
} else {
addend = 0;
{
walk_memory_regions_fn fn;
void *priv;
- unsigned long start;
+ uintptr_t start;
int prot;
};
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
{
struct walk_memory_regions_data data;
- unsigned long i;
+ uintptr_t i;
data.fn = fn;
data.priv = priv;
}
static inline void tlb_set_dirty(CPUArchState *env,
- unsigned long addr, target_ulong vaddr)
+ uintptr_t addr, target_ulong vaddr)
{
}
#endif /* defined(CONFIG_USER_ONLY) */