2 * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
12 #include "native_client/src/untrusted/irt/irt.h"
13 #include "native_client/src/untrusted/irt/irt_private.h"
14 #include "native_client/src/untrusted/nacl/syscall_bindings_trampoline.h"
16 static int nacl_irt_sysbrk(void **newbrk) {
18 * The syscall does not actually indicate error. It just returns the
19 * new current value, which is unchanged if something went wrong.
20 * But if the requested value was below the end of the data segment,
21 * the new value will be greater, but this is not "going wrong".
22 * Here we just approximate a saner interface: you get what you requested,
23 * you did a "probe" request passing NULL in, or it's an error.
24 * TODO(mcgrathr): this interface should just go away!!
26 void *requested = *newbrk;
27 void *got = NACL_SYSCALL(brk)(requested);
29 if (got == requested || requested == NULL) {
37 static int nacl_irt_mmap(void **addr, size_t len,
38 int prot, int flags, int fd, off_t off) {
40 * We currently do not allow PROT_EXEC to be called without MAP_FIXED, but
41 * potentially we could call allocate_code_data at this point and turn it
42 * into a MAP_FIXED call. When we do this we probably should be able to hold
43 * the code/data allocation mutex to minimize fragmentation.
45 uint32_t rv = (uintptr_t) NACL_SYSCALL(mmap)(*addr, len, prot, flags,
47 if ((uint32_t) rv > 0xffff0000u)
49 *addr = (void *) (uintptr_t) rv;
52 * When PROT_EXEC flag is set, we must coordinate code segments with
53 * code/data allocations so they are interlocked. This is safe to do at the
54 * end because of the constraint that code allocations must be mapped with
55 * MAP_FIXED. MAP_FIXED is specified to discard any already mapped pages if an
56 * already mapped page overlaps with the set of pages being mapped. This means
57 * if 2 threads happen to overlap a set of pages through both mmap and
58 * nacl_irt_code_data_allocate simultenously, it does not introduce any extra
59 * race conditions as having mapped them both sequentially. The code blocks
60 * must both already be coordinating through the code/data allocation
61 * functions, or through some other external means.
63 * For example, if mmap and nacl_irt_code_data_allocate were both called
64 * simultaneously, 3 conditions could occur:
65 * 1. If one of the calls fails, the failed function will not have mapped
66 * anything so the other one will simply succeed. Note this section
67 * which calls irt_reserve_code_allocation is only done on the success
69 * 2. If both succeed, but nacl_irt_code_data_allocate allocates the code
70 * address before the NACL_SYSCALL(mmap) call or after
71 * the reserve code allocation call, it is not really interwoven
72 * this case is equivalent to simple sequential calls.
73 * 3. If both succeed, but nacl_irt_code_data_allocate allocates the code
74 * address after the NACL_SYSCALL(mmap) but before the reserve code
75 * allocation call, the reserve code allocation call could be too late
76 * and attempt to reserve what the code/data allocation happened to
77 * return. But because the mmap call was mapped with MAP_FIXED, it would
78 * not have made a difference. It is as if someone called
79 * nacl_irt_code_data_allocate first, then called the mmap with MAP_FIXED
80 * at the same location.
82 if (prot & PROT_EXEC) {
83 irt_reserve_code_allocation((uintptr_t) *addr, len);
89 * mmap from nacl-irt-memory-0.1 interface should ignore PROT_EXEC bit for
90 * backward-compatibility reasons.
92 static int nacl_irt_mmap_v0_1(void **addr, size_t len,
93 int prot, int flags, int fd, off_t off) {
94 return nacl_irt_mmap(addr, len, prot & ~PROT_EXEC, flags, fd, off);
97 static int nacl_irt_munmap(void *addr, size_t len) {
98 return -NACL_SYSCALL(munmap)(addr, len);
101 static int nacl_irt_mprotect(void *addr, size_t len, int prot) {
102 return -NACL_SYSCALL(mprotect)(addr, len, prot);
105 const struct nacl_irt_memory_v0_1 nacl_irt_memory_v0_1 = {
111 const struct nacl_irt_memory_v0_2 nacl_irt_memory_v0_2 = {
118 const struct nacl_irt_memory nacl_irt_memory = {