From: Richard Henderson Date: Wed, 10 Mar 2010 23:39:07 +0000 (-0800) Subject: linux-user: Fix mmap_find_vma returning invalid addresses. X-Git-Tag: TizenStudio_2.0_p2.3.2~208^2~8481 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=14f24e1465edc44b9b4d89fbbea66e06088154e1;p=sdk%2Femulator%2Fqemu.git linux-user: Fix mmap_find_vma returning invalid addresses. Don't return addresses that aren't properly aligned for the guest, e.g. when the guest has a larger page size than the host. Don't return addresses that are outside the virtual address space for the target, by paying proper attention to the h2g/g2h macros. At the same time, place the default mapping base for 64-bit guests (on 64-bit hosts) outside the low 4G. Consistently interpret mmap_next_start in the guest address space. Signed-off-by: Richard Henderson --- diff --git a/linux-user/main.c b/linux-user/main.c index eeae22e..4614e3c 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -39,8 +39,8 @@ char *exec_path; int singlestep; -#if defined(CONFIG_USE_GUEST_BASE) unsigned long mmap_min_addr; +#if defined(CONFIG_USE_GUEST_BASE) unsigned long guest_base; int have_guest_base; #endif @@ -2812,16 +2812,14 @@ int main(int argc, char **argv, char **envp) * proper page alignment for guest_base. */ guest_base = HOST_PAGE_ALIGN(guest_base); +#endif /* CONFIG_USE_GUEST_BASE */ /* * Read in mmap_min_addr kernel parameter. This value is used * When loading the ELF image to determine whether guest_base - * is needed. - * - * When user has explicitly set the quest base, we skip this - * test. + * is needed. It is also used in mmap_find_vma. */ - if (!have_guest_base) { + { FILE *fp; if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) { @@ -2833,7 +2831,6 @@ int main(int argc, char **argv, char **envp) fclose(fp); } } -#endif /* CONFIG_USE_GUEST_BASE */ /* * Prepare copy of argv vector for target. diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 65fdc33..ad00b6f 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -264,12 +264,15 @@ static int mmap_frag(abi_ulong real_start, return 0; } -#if defined(__CYGWIN__) +#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 +# define TASK_UNMAPPED_BASE (1ul << 38) +#elif defined(__CYGWIN__) /* Cygwin doesn't have a whole lot of address space. */ -static abi_ulong mmap_next_start = 0x18000000; +# define TASK_UNMAPPED_BASE 0x18000000 #else -static abi_ulong mmap_next_start = 0x40000000; +# define TASK_UNMAPPED_BASE 0x40000000 #endif +static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; unsigned long last_brk; @@ -281,19 +284,24 @@ unsigned long last_brk; */ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) { - void *ptr; + void *ptr, *prev; abi_ulong addr; - - size = HOST_PAGE_ALIGN(size); - start &= qemu_host_page_mask; + int wrapped, repeat; /* If 'start' == 0, then a default start address is used. */ - if (start == 0) + if (start == 0) { start = mmap_next_start; + } else { + start &= qemu_host_page_mask; + } + + size = HOST_PAGE_ALIGN(size); addr = start; + wrapped = repeat = 0; + prev = 0; - for(;;) { + for (;; prev = ptr) { /* * Reserve needed memory area to avoid a race. * It should be discarded using: @@ -301,31 +309,77 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) * - mremap() with MREMAP_FIXED flag * - shmat() with SHM_REMAP flag */ - ptr = mmap((void *)(unsigned long)addr, size, PROT_NONE, + ptr = mmap(g2h(addr), size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); /* ENOMEM, if host address space has no memory */ - if (ptr == MAP_FAILED) + if (ptr == MAP_FAILED) { return (abi_ulong)-1; + } - /* If address fits target address space we've found what we need */ - if ((unsigned long)ptr + size - 1 <= (abi_ulong)-1) - break; + /* Count the number of sequential returns of the same address. + This is used to modify the search algorithm below. */ + repeat = (ptr == prev ? repeat + 1 : 0); - /* Unmap and try again with new page */ + if (h2g_valid(ptr + size - 1)) { + addr = h2g(ptr); + + if ((addr & ~TARGET_PAGE_MASK) == 0) { + /* Success. */ + if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { + mmap_next_start = addr + size; + } + return addr; + } + + /* The address is not properly aligned for the target. */ + switch (repeat) { + case 0: + /* Assume the result that the kernel gave us is the + first with enough free space, so start again at the + next higher target page. */ + addr = TARGET_PAGE_ALIGN(addr); + break; + case 1: + /* Sometimes the kernel decides to perform the allocation + at the top end of memory instead. */ + addr &= TARGET_PAGE_MASK; + break; + case 2: + /* Start over at low memory. */ + addr = 0; + break; + default: + /* Fail. This unaligned block must the last. */ + addr = -1; + break; + } + } else { + /* Since the result the kernel gave didn't fit, start + again at low memory. If any repetition, fail. */ + addr = (repeat ? -1 : 0); + } + + /* Unmap and try again. */ munmap(ptr, size); - addr += qemu_host_page_size; - /* ENOMEM if we check whole of target address space */ - if (addr == start) + /* ENOMEM if we checked the whole of the target address space. */ + if (addr == -1ul) { return (abi_ulong)-1; + } else if (addr == 0) { + if (wrapped) { + return (abi_ulong)-1; + } + wrapped = 1; + /* Don't actually use 0 when wrapping, instead indicate + that we'd truely like an allocation in low memory. */ + addr = (mmap_min_addr > TARGET_PAGE_SIZE + ? TARGET_PAGE_ALIGN(mmap_min_addr) + : TARGET_PAGE_SIZE); + } else if (wrapped && addr >= start) { + return (abi_ulong)-1; + } } - - /* Update default start address */ - if (start == mmap_next_start) - mmap_next_start = (unsigned long)ptr + size; - - return h2g(ptr); } /* NOTE: all the constants are the HOST ones */ diff --git a/linux-user/qemu.h b/linux-user/qemu.h index d129deb..6ab9517 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -133,9 +133,7 @@ void init_task_state(TaskState *ts); void task_settid(TaskState *); void stop_all_tasks(void); extern const char *qemu_uname_release; -#if defined(CONFIG_USE_GUEST_BASE) extern unsigned long mmap_min_addr; -#endif /* ??? See if we can avoid exposing so much of the loader internals. */ /*