From 43e232fa1613e19aad0eff63b51f20facb9bfe17 Mon Sep 17 00:00:00 2001 From: ivmai Date: Sat, 19 Feb 2011 20:10:41 +0000 Subject: [PATCH] 2011-02-19 Ivan Maidanski * src/atomic_ops.c: Include sys/time.h (to get timespec) for NaCl. * src/atomic_ops_malloc.c (msb): Do the shift by 32 only once (in a conditional expression) to prevent a compiler warning. * src/atomic_ops_malloc.c: Expand all tabs to spaces; remove trailing spaces at EOLn. --- ChangeLog | 8 +++++ src/atomic_ops.c | 1 + src/atomic_ops_malloc.c | 89 +++++++++++++++++++++++++------------------------ 3 files changed, 55 insertions(+), 43 deletions(-) diff --git a/ChangeLog b/ChangeLog index cd06a13..1b16707 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,13 @@ 2011-02-19 Ivan Maidanski + * src/atomic_ops.c: Include sys/time.h (to get timespec) for NaCl. + * src/atomic_ops_malloc.c (msb): Do the shift by 32 only once (in + a conditional expression) to prevent a compiler warning. + * src/atomic_ops_malloc.c: Expand all tabs to spaces; remove + trailing spaces at EOLn. + +2011-02-19 Ivan Maidanski + * src/atomic_ops.c: Explicitly define AO_USE_NO_SIGNALS and AO_USE_NANOSLEEP for NaCl. diff --git a/src/atomic_ops.c b/src/atomic_ops.c index 66fba0d..5588e95 100644 --- a/src/atomic_ops.c +++ b/src/atomic_ops.c @@ -56,6 +56,7 @@ #ifdef AO_USE_NANOSLEEP /* This requires _POSIX_TIMERS feature. */ +# include # include #elif defined(AO_USE_WIN32_PTHREADS) # include /* for Sleep() */ diff --git a/src/atomic_ops_malloc.c b/src/atomic_ops_malloc.c index dff4908..4c08269 100644 --- a/src/atomic_ops_malloc.c +++ b/src/atomic_ops_malloc.c @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * Original Author: Hans Boehm * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. - * + * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the @@ -18,7 +18,7 @@ #define AO_REQUIRE_CAS #include "atomic_ops_stack.h" -#include /* for ffs, which is assumed reentrant. */ +#include /* for ffs, which is assumed reentrant. */ #include #ifdef AO_TRACE_MALLOC # include @@ -31,8 +31,8 @@ * We keep one stack of free objects for each size. Each object * has an initial word (offset -sizeof(AO_t) from the visible pointer) * which contains either - * The binary log of the object size in bytes (small objects) - * The object size (a multiple of CHUNK_SIZE) for large objects. + * The binary log of the object size in bytes (small objects) + * The object size (a multiple of CHUNK_SIZE) for large objects. * The second case only arises if mmap-based allocation is supported. * We align the user-visible part of each object on a GRANULARITY * byte boundary. That means that the actual (hidden) start of @@ -41,12 +41,12 @@ #ifndef LOG_MAX_SIZE # define LOG_MAX_SIZE 16 - /* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */ + /* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */ #endif #ifndef ALIGNMENT # define ALIGNMENT 16 - /* Assumed to be at least sizeof(AO_t). */ + /* Assumed to be at least sizeof(AO_t). */ #endif #define CHUNK_SIZE (1 << LOG_MAX_SIZE) @@ -83,15 +83,15 @@ static char *get_mmaped(size_t sz) if (!mmap_enabled) return 0; # if defined(MAP_ANONYMOUS) result = mmap(0, sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); # elif defined(MAP_ANON) result = mmap(0, sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, -1, 0); + MAP_PRIVATE | MAP_ANON, -1, 0); # else { int zero_fd = open("/dev/zero", O_RDONLY); result = mmap(0, sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE, zero_fd, 0); + MAP_PRIVATE, zero_fd, 0); close(zero_fd); } # endif @@ -99,15 +99,15 @@ static char *get_mmaped(size_t sz) return result; } -/* Allocate an object of size (incl. header) of size > CHUNK_SIZE. */ -/* sz includes space for an AO_t-sized header. */ +/* Allocate an object of size (incl. header) of size > CHUNK_SIZE. */ +/* sz includes space for an AO_t-sized header. */ static char * AO_malloc_large(size_t sz) { char * result; - /* The header will force us to waste ALIGNMENT bytes, incl. header. */ + /* The header will force us to waste ALIGNMENT bytes, incl. header. */ sz += ALIGNMENT; - /* Round to multiple of CHUNK_SIZE. */ + /* Round to multiple of CHUNK_SIZE. */ sz = (sz + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1); result = get_mmaped(sz); if (result == 0) return 0; @@ -123,7 +123,7 @@ AO_free_large(char * p) if (munmap(p - ALIGNMENT, (size_t)sz) != 0) abort(); /* Programmer error. Not really async-signal-safe, but ... */ } - + #else /* No MMAP */ @@ -161,38 +161,38 @@ get_chunk(void) retry: initial_ptr = (char *)AO_load(&initial_heap_ptr); my_chunk_ptr = (char *)(((AO_t)initial_ptr + (ALIGNMENT - 1)) - & ~(ALIGNMENT - 1)); + & ~(ALIGNMENT - 1)); if (initial_ptr != my_chunk_ptr) { - /* Align correctly. If this fails, someone else did it for us. */ + /* Align correctly. If this fails, someone else did it for us. */ AO_compare_and_swap_acquire(&initial_heap_ptr, (AO_t)initial_ptr, - (AO_t)my_chunk_ptr); + (AO_t)my_chunk_ptr); } my_lim = my_chunk_ptr + CHUNK_SIZE; if (my_lim <= initial_heap_lim) { if (!AO_compare_and_swap(&initial_heap_ptr, (AO_t)my_chunk_ptr, - (AO_t)my_lim)) + (AO_t)my_lim)) goto retry; return my_chunk_ptr; } - /* We failed. The initial heap is used up. */ + /* We failed. The initial heap is used up. */ my_chunk_ptr = get_mmaped(CHUNK_SIZE); assert (!((AO_t)my_chunk_ptr & (ALIGNMENT-1))); return my_chunk_ptr; } -/* Object free lists. Ith entry corresponds to objects */ -/* of total size 2**i bytes. */ +/* Object free lists. Ith entry corresponds to objects */ +/* of total size 2**i bytes. */ AO_stack_t AO_free_list[LOG_MAX_SIZE+1]; -/* Chunk free list, linked through first word in chunks. */ -/* All entries of size CHUNK_SIZE. */ +/* Chunk free list, linked through first word in chunks. */ +/* All entries of size CHUNK_SIZE. */ AO_stack_t AO_chunk_free_list; -/* Break up the chunk, and add it to the object free list for */ -/* the given size. Sz must be a power of two. */ -/* We have exclusive access to chunk. */ +/* Break up the chunk, and add it to the object free list for */ +/* the given size. Sz must be a power of two. */ +/* We have exclusive access to chunk. */ static void add_chunk_as(void * chunk, size_t sz, unsigned log_sz) { @@ -208,30 +208,33 @@ add_chunk_as(void * chunk, size_t sz, unsigned log_sz) static int msbs[16] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4}; -/* Return the position of the most significant set bit in the */ -/* argument. */ -/* We follow the conventions of ffs(), i.e. the least */ -/* significant bit is number one. */ +/* Return the position of the most significant set bit in the */ +/* argument. */ +/* We follow the conventions of ffs(), i.e. the least */ +/* significant bit is number one. */ int msb(size_t s) { int result = 0; + int v; if ((s & 0xff) != s) { - /* The following shift often generates warnings on 32-bit arch's */ - /* That's OK, because it will never be executed there. */ - if (sizeof(size_t) > 4 && (s >> 32) != 0) + /* The following shift often generates warnings on 32-bit arch's */ + /* That's OK, because it will never be executed there. */ + /* Doing the shift only in a conditional expression suppresses the */ + /* warning with the modern compilers. */ + if (sizeof(size_t) > 4 && (v = s >> 32) != 0) { - s >>= 32; - result += 32; + s = v; + result += 32; } if ((s >> 16) != 0) { - s >>= 16; - result += 16; + s >>= 16; + result += 16; } if ((s >> 8) != 0) { - s >>= 8; - result += 8; + s >>= 8; + result += 8; } } if (s > 15) @@ -263,7 +266,7 @@ AO_malloc(size_t sz) *result = log_sz; # ifdef AO_TRACE_MALLOC fprintf(stderr, "%x: AO_malloc(%lu) = %p\n", - (int)pthread_self(), (unsigned long)sz, result+1); + (int)pthread_self(), (unsigned long)sz, result+1); # endif return result + 1; } @@ -278,8 +281,8 @@ AO_free(void *p) log_sz = *(AO_t *)base; # ifdef AO_TRACE_MALLOC fprintf(stderr, "%x: AO_free(%p sz:%lu)\n", (int)pthread_self(), p, - (unsigned long) - (log_sz > LOG_MAX_SIZE? log_sz : (1 << log_sz))); + (unsigned long) + (log_sz > LOG_MAX_SIZE? log_sz : (1 << log_sz))); # endif if (log_sz > LOG_MAX_SIZE) AO_free_large(p); -- 2.7.4