#include <sys/types.h>
#include <_itoa.h>
#include "dynamic-link.h"
+#include <libc-internal.h>
/* Statistics function. */
#ifdef SHARED
map->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
- size_t offset = (((GL(dl_tls_static_used)
- - map->l_tls_firstbyte_offset
- + map->l_tls_align - 1) & -map->l_tls_align)
+ size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
+ - map->l_tls_firstbyte_offset,
+ map->l_tls_align)
+ map->l_tls_firstbyte_offset);
size_t used = offset + map->l_tls_blocksize;
struct textrels *newp;
newp = (struct textrels *) alloca (sizeof (*newp));
- newp->len = (((ph->p_vaddr + ph->p_memsz + GLRO(dl_pagesize) - 1)
- & ~(GLRO(dl_pagesize) - 1))
- - (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
- newp->start = ((ph->p_vaddr & ~(GLRO(dl_pagesize) - 1))
- + (caddr_t) l->l_addr);
+ newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize))
+ - ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
+ newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize))
+ + (caddr_t) l->l_addr;
if (__mprotect (newp->start, newp->len, PROT_READ|PROT_WRITE) < 0)
{
void internal_function
_dl_protect_relro (struct link_map *l)
{
- ElfW(Addr) start = ((l->l_addr + l->l_relro_addr)
- & ~(GLRO(dl_pagesize) - 1));
- ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size)
- & ~(GLRO(dl_pagesize) - 1));
-
+ ElfW(Addr) start = ALIGN_DOWN((l->l_addr
+ + l->l_relro_addr),
+ GLRO(dl_pagesize));
+ ElfW(Addr) end = ALIGN_DOWN((l->l_addr
+ + l->l_relro_addr
+ + l->l_relro_size),
+ GLRO(dl_pagesize));
if (start != end
&& __mprotect ((void *) start, end - start, PROT_READ) < 0)
{
/* For MIN, MAX, powerof2. */
#include <sys/param.h>
+/* For ALIGN_UP. */
+#include <libc-internal.h>
+
/*
Debugging:
return;
/* pagesize is a power of 2 */
- assert ((GLRO (dl_pagesize) & (GLRO (dl_pagesize) - 1)) == 0);
+ assert (powerof2(GLRO (dl_pagesize)));
/* A contiguous main_arena is consistent with sbrk_base. */
if (av == &main_arena && contiguous (av))
unsigned long remainder_size; /* its size */
- size_t pagemask = GLRO (dl_pagesize) - 1;
+ size_t pagesize = GLRO (dl_pagesize);
bool tried_mmap = false;
need for further alignments unless we have have high alignment.
*/
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
- size = (nb + SIZE_SZ + pagemask) & ~pagemask;
+ size = ALIGN_UP (nb + SIZE_SZ, pagesize);
else
- size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
+ size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
tried_mmap = true;
/* Don't try if size wraps around 0 */
assert ((old_top == initial_top (av) && old_size == 0) ||
((unsigned long) (old_size) >= MINSIZE &&
prev_inuse (old_top) &&
- ((unsigned long) old_end & pagemask) == 0));
+ ((unsigned long) old_end & (pagesize - 1)) == 0));
/* Precondition: not enough current space to satisfy nb request */
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
previous calls. Otherwise, we correct to page-align below.
*/
- size = (size + pagemask) & ~pagemask;
+ size = ALIGN_UP (size, pagesize);
/*
Don't try to call MORECORE if argument is so big as to appear
/* Cannot merge with old top, so add its size back in */
if (contiguous (av))
- size = (size + old_size + pagemask) & ~pagemask;
+ size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */
if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
/* Extend the end address to hit a page boundary */
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
- correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
+ correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
assert (correction >= 0);
snd_brk = (char *) (MORECORE (correction));
long released; /* Amount actually released */
char *current_brk; /* address returned by pre-check sbrk call */
char *new_brk; /* address returned by post-check sbrk call */
- size_t pagesz;
+ size_t pagesize;
long top_area;
- pagesz = GLRO (dl_pagesize);
+ pagesize = GLRO (dl_pagesize);
top_size = chunksize (av->top);
top_area = top_size - MINSIZE - 1;
return 0;
/* Release in pagesize units, keeping at least one page */
- extra = (top_area - pad) & ~(pagesz - 1);
+ extra = (top_area - pad) & ~(pagesize - 1);
if (extra == 0)
return 0;
internal_function
mremap_chunk (mchunkptr p, size_t new_size)
{
- size_t page_mask = GLRO (dl_pagesize) - 1;
+ size_t pagesize = GLRO (dl_pagesize);
INTERNAL_SIZE_T offset = p->prev_size;
INTERNAL_SIZE_T size = chunksize (p);
char *cp;
assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
- new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
+ new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
/* No need to remap if the number of pages does not change. */
if (size + offset == new_size)
ptmalloc_init ();
void *address = RETURN_ADDRESS (0);
- size_t pagesz = GLRO (dl_pagesize);
- return _mid_memalign (pagesz, bytes, address);
+ size_t pagesize = GLRO (dl_pagesize);
+ return _mid_memalign (pagesize, bytes, address);
}
void *
ptmalloc_init ();
void *address = RETURN_ADDRESS (0);
- size_t pagesz = GLRO (dl_pagesize);
- size_t page_mask = GLRO (dl_pagesize) - 1;
- size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
+ size_t pagesize = GLRO (dl_pagesize);
+ size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
/* Check for overflow. */
- if (bytes > SIZE_MAX - 2 * pagesz - MINSIZE)
+ if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
{
__set_errno (ENOMEM);
return 0;
}
- return _mid_memalign (pagesz, rounded_bytes, address);
+ return _mid_memalign (pagesize, rounded_bytes, address);
}
void *