/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0);
- /* Increment the open count for all dependencies. */
- for (i = 0; i < new->l_searchlist.r_nlist; ++i)
- ++new->l_searchlist.r_list[i]->l_opencount;
-
/* So far, so good. Now check the versions. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_versions == NULL)
l = l->l_prev;
}
+ /* Increment the open count for all dependencies. */
+ for (i = 0; i < new->l_searchlist.r_nlist; ++i)
+ ++new->l_searchlist.r_list[i]->l_opencount;
+
/* Run the initializer functions of new objects. */
_dl_init (new, __libc_argc, __libc_argv, __environ);
{
int i;
- /* Increment open counters for all objects which did not get
- correctly loaded. */
+ /* Increment open counters for all objects since this has
+ not happened yet. */
for (i = 0; i < args.map->l_searchlist.r_nlist; ++i)
- if (args.map->l_searchlist.r_list[i]->l_opencount == 0)
- args.map->l_searchlist.r_list[i]->l_opencount = 1;
+ ++args.map->l_searchlist.r_list[i]->l_opencount;
_dl_close (args.map);
}
new_thread_bottom = (char *) map_addr + guardsize;
new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
-# else
+# else /* !FLOATING_STACKS */
if (attr != NULL)
{
guardsize = page_roundup (attr->__guardsize, granularity);
{
size_t guardsize = th->p_guardsize;
/* Free the stack and thread descriptor area */
-#ifdef NEED_SEPARATE_REGISTER_STACK
char *guardaddr = th->p_guardaddr;
- /* We unmap exactly what we mapped, in case there was something
- else in the same region. Guardaddr is always set, eve if
- guardsize is 0. This allows us to compute everything else. */
+ /* Guardaddr is always set, even if guardsize is 0. This allows
+ us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
- /* Unmap the register stack, which is below guardaddr. */
- munmap((caddr_t)(guardaddr-stacksize),
- 2 * stacksize + th->p_guardsize);
+#ifdef NEED_SEPARATE_REGISTER_STACK
+ /* Take account of the register stack, which is below guardaddr. */
+ guardaddr -= stacksize;
+ stacksize *= 2;
+#endif
+#if FLOATING_STACKS
+ /* Can unmap safely. */
+ munmap(guardaddr, stacksize + guardsize);
#else
- char *guardaddr = th->p_guardaddr;
- /* We unmap exactly what we mapped, in case there was something
- else in the same region. Guardaddr is always set, eve if
- guardsize is 0. This allows us to compute everything else. */
- size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
-
- munmap (guardaddr, stacksize + guardsize);
+ /* Only remap to PROT_NONE, so that the region is reserved in
+ case we map the stack again later. Avoid collision with
+ other mmap()s, in particular by malloc(). */
+ mmap(guardaddr, stacksize + guardsize, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
#endif
}
}
#endif
#endif
-#if USE_MEMCPY
-
/* The following macros are only invoked with (2n+1)-multiples of
INTERNAL_SIZE_T units, with a positive integer n. This is exploited
- for fast inline execution when n is small. */
+ for fast inline execution when n is small. If the regions to be
+ copied do overlap, the destination lies always _below_ the source. */
+
+#if USE_MEMCPY
#define MALLOC_ZERO(charp, nbytes) \
do { \
} else memset((charp), 0, mzsz); \
} while(0)
-#define MALLOC_COPY(dest,src,nbytes) \
+/* If the regions overlap, dest is always _below_ src. */
+
+#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T mcsz = (nbytes); \
if(mcsz <= 9*sizeof(mcsz)) { \
*mcdst++ = *mcsrc++; \
*mcdst++ = *mcsrc++; \
*mcdst = *mcsrc ; \
- } else memcpy(dest, src, mcsz); \
+ } else if(overlap) \
+ memmove(dest, src, mcsz); \
+ else \
+ memcpy(dest, src, mcsz); \
} while(0)
-#define MALLOC_MEMMOVE(dest,src,nbytes) \
- memmove(dest, src, mcsz)
-
#else /* !USE_MEMCPY */
/* Use Duff's device for good zeroing/copying performance. */
} \
} while(0)
-#define MALLOC_COPY(dest,src,nbytes) \
+/* If the regions overlap, dest is always _below_ src. */
+
+#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
/* Must alloc, copy, free. */
newmem = mALLOc(bytes);
if (newmem == 0) return 0; /* propagate failure */
- MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
+ MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
return newmem;
}
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize + nextsize;
- MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
+ MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize,
+ 1);
top(ar_ptr) = chunk_at_offset(newp, nb);
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb);
unlink(prev, bck, fwd);
newp = prev;
newsize += nextsize + prevsize;
- MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
+ MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize;
- MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
+ MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
}
/* Otherwise copy, free, and exit */
- MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
+ MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 0);
chunk_free(ar_ptr, oldp);
return newp;
}
newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) {
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb),
- oldmem, oldsize - 2*SIZE_SZ);
+ oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
}
}