#include <stddef.h>
#include <inttypes.h>
+/*
+ * Note: add new members to this structure only at the end.
+ * The position of elements in this structure is an ABI.
+ */
struct com32_pmapi {
+ void *(*lmalloc)(size_t);
+ void (*lfree)(void *);
+
size_t (*read_file)(uint16_t *, void *, size_t);
};
\
opendir.o readdir.o closedir.o getcwd.o chdir.o fdopendir.o \
\
- libgcc/__ashldi3.o libgcc/__udivdi3.o \
+ libgcc/__ashldi3.o libgcc/__udivdi3.o \
libgcc/__negdi2.o libgcc/__ashrdi3.o libgcc/__lshrdi3.o \
libgcc/__muldi3.o libgcc/__udivmoddi4.o libgcc/__umoddi3.o \
libgcc/__divdi3.o libgcc/__moddi3.o \
syslinux/video/fontquery.o syslinux/video/forcetext.o \
syslinux/video/reportmode.o
+# These are the objects which are also imported into the core
+LIBCOREOBJS = \
+ memcpy.o mempcpy.o memset.o memcmp.o memmove.o \
+ strlen.o stpcpy.o strcpy.o strcmp.o strlcpy.o strlcat.o \
+ strchr.o strncmp.o strncpy.o \
+ \
+ snprintf.o sprintf.o vsnprintf.o \
+ \
+ dprintf.o vdprintf.o \
+ \
+ zalloc.o strdup.o \
+ \
+ sys/intcall.o sys/farcall.o sys/cfarcall.o sys/zeroregs.o \
+ \
+ libgcc/__ashldi3.o libgcc/__udivdi3.o \
+ libgcc/__negdi2.o libgcc/__ashrdi3.o libgcc/__lshrdi3.o \
+ libgcc/__muldi3.o libgcc/__udivmoddi4.o libgcc/__umoddi3.o \
+ libgcc/__divdi3.o libgcc/__moddi3.o
+
BINDIR = /usr/bin
LIBDIR = /usr/lib
DATADIR = /usr/share
INCDIR = /usr/include
COM32DIR = $(AUXDIR)/com32
-all: libcom32.a
+all: libcom32.a libcomcore.a
libcom32.a : $(LIBOBJS)
rm -f $@
$(AR) cq $@ $^
$(RANLIB) $@
+libcomcore.a : $(LIBCOREOBJS)
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
+
tidy dist clean:
rm -f sys/vesa/alphatbl.c
find . \( -name \*.o -o -name \*.a -o -name .\*.d -o -name \*.tmp \) -print0 | \
## -----------------------------------------------------------------------
##
## Copyright 1998-2009 H. Peter Anvin - All Rights Reserved
-## Copyright 2009 Intel Corporation; author: H. Peter Anvin
+## Copyright 2009-2010 Intel Corporation; author: H. Peter Anvin
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
SOBJ := $(patsubst %.S,%.o,$(SSRC))
LIB = libcore.a
-LIBS = $(LIB) $(com32)/lib/libcom32.a $(LIBGCC)
+LIBS = $(LIB) $(com32)/lib/libcomcore.a $(LIBGCC)
LIBOBJS = $(COBJ) $(SOBJ)
NASMDEBUG = -g -F dwarf
-l $(@:.o=.lsr) -o $@ -MP -MD .$@.d $<
%.elf: %.o $(LIBS) syslinux.ld
- $(LD) $(LDFLAGS) -T syslinux.ld -M -o $@ $< $(LIBS) \
+ $(LD) $(LDFLAGS) -T syslinux.ld -M -o $@ $< \
+ --start-group $(LIBS) --end-group \
> $(@:.elf=.map)
$(OBJDUMP) -h $@ > $(@:.elf=.sec)
$(PERL) lstadjust.pl $(@:.elf=.lsr) $(@:.elf=.sec) $(@:.elf=.lst)
call com32_entry ; Run the program...
; ... on return, fall through to com32_exit ...
com32_exit:
- mov bx,.rm
+ mov bx,comboot_return
jmp enter_rm
bits 16
section .text16
-.rm:
- mov dword [PMESP],__stack_end ; Stop use of COM32 stack
- sti
- jmp enter_command
-
not_com32r:
mov si,KernelCName
call writestr
;
; Danger, Will Robinson: it's not clear the use of
; core_xfer_buf is safe here.
- global __com32
+ global __entry_esp, __com32
alignz 4
+__entry_esp:
+ dd 0 ; Dummy to avoid _exit issues
__com32:
dd 8 ; Argument count
dd 0 ; No command line
dd core_cfarcall ; Cfarcall entry point
HighMemSize dd 0 ; End of memory pointer (bytes)
dd pm_api_vector ; Protected mode functions
+
jmp comboot_seg:100h ; Run it
-; Proper return vector
-; Note: this gets invoked both via INT 21h and directly via INT 20h.
-; We don't need to cld explicitly here, because comboot_exit does that
-; when invoking RESET_STACK_AND_SEGS.
-comboot_return: cli ; May not have a safe stack
- push enter_command ; Normal return to command prompt
- jmp comboot_exit
-
;
; Set up the COMBOOT API interrupt vectors. This is now done at
; initialization time.
ret
;
-; Restore the original state of the COMBOOT API vectors
+; Restore the original state of the COMBOOT API vectors, and free
+; any low memory allocated by the comboot module.
;
comboot_cleanup_api:
pusha
call crlf
jmp enter_command
+; Proper return vector
+; Note: this gets invoked both via INT 21h and directly via INT 20h.
+; We don't need to cld explicitly here, because comboot_exit does that
+; when invoking RESET_STACK_AND_SEGS.
+comboot_return:
+ cli ; May not have a safe stack
+ push enter_command ; Normal return to command prompt
+ ; jmp comboot_exit
+
;
; Generic COMBOOT return to command line code
; stack -> where to go next
; CX -> message (for _msg version)
;
+ extern comboot_cleanup_lowmem
comboot_exit:
xor cx,cx
comboot_exit_msg:
pop bx ; Return address
- RESET_STACK_AND_SEGS SI ; Contains sti, cld
+ RESET_STACK_AND_SEGS si ; Contains sti, cld
+ pm_call comboot_cleanup_lowmem
call adjust_screen ; The COMBOOT program might have changed the screen
jcxz .nomsg
mov si,KernelCName
} else {
static bool already = false;
if (!already) {
- fputs("URL syntax, but gPXE extensions not detected, "
- "tryng plain TFTP...\n", stdout);
+ printf("URL syntax, but gPXE extensions not detected, "
+ "tryng plain TFTP...\n");
already = true;
}
}
/* hello.c */
extern void myputs(const char*);
-/* malloc.c */
+/* mem/malloc.c, mem/free.c, mem/init.c */
extern void *malloc(size_t);
+extern void *lmalloc(size_t);
+extern void *pmapi_lmalloc(size_t);
extern void *zalloc(size_t);
extern void free(void *);
extern void mem_init(void);
; segment for COMBOOT images, which can use all 64K
;
int 12h
- mov dx,real_mode_seg + 0x1000
- shr dx,6
+ mov edx,__lowmem_heap + min_lowmem_heap + 1023
+ shr edx,10
cmp ax,dx
jae enough_ram
mov ax,dx
section .real_mode write nobits align=65536
global core_real_mode
core_real_mode resb 65536
-
comboot_seg equ real_mode_seg ; COMBOOT image loading zone
+;
+; At the very end, the lowmem heap
+;
+ extern __lowmem_heap
+min_lowmem_heap equ 65536
+
section .text16
+++ /dev/null
-/*
- * A simple temp malloc for Sysliux project from fstk. For now, just used
- * in fsc branch, which it's would be easy to remove it when we have a
- * powerful one, as hpa said this would happen when elflink branch do the
- * work.
- *
- * Copyright (C) 2009 Liu Aleaxander -- All rights reserved. This file
- * may be redistributed under the terms of the GNU Public License.
- */
-
-
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-
-/* The memory managemant structure */
-struct mem_struct {
- struct mem_struct *prev;
- int size;
- int free;
-};
-
-
-/* First, assume we just need 64K memory */
-static char memory[0x10000];
-
-/* Next free memory address */
-static struct mem_struct *next_start = (struct mem_struct *)memory;
-static uint32_t mem_end = (uint32_t)(memory + 0x10000);
-
-
-static inline struct mem_struct *get_next(struct mem_struct *mm)
-{
- uint32_t next = (uint32_t)mm + mm->size;
-
- if (next >= mem_end)
- return NULL;
- else
- return (struct mem_struct *)next;
-}
-
-/*
- * Here are the _merge_ functions, that merges a adjacent memory region,
- * from front, or from back, or even merges both. It returns the headest
- * region mem_struct.
- *
- */
-
-static struct mem_struct * merge_front(struct mem_struct *mm,
- struct mem_struct *prev)
-{
- struct mem_struct *next = get_next(mm);
-
- prev->size += mm->size;
- if (next)
- next->prev = prev;
- return prev;
-}
-
-static struct mem_struct * merge_back(struct mem_struct *mm,
- struct mem_struct *next)
-{
- mm->free = 1; /* Mark it free first */
- mm->size += next->size;
-
- next = get_next(next);
- if (next)
- next->prev = mm;
- return mm;
-}
-
-static struct mem_struct * merge_both(struct mem_struct *mm,
- struct mem_struct *prev,
- struct mem_struct *next)
-{
- prev->size += mm->size + next->size;
-
- next = get_next(next);
- if (next)
- next->prev = prev;
- return prev;
-}
-
-static inline struct mem_struct * try_merge_front(struct mem_struct *mm)
-{
- mm->free = 1;
- if (mm->prev->free)
- mm = merge_front(mm, mm->prev);
- return mm;
-}
-
-static inline struct mem_struct * try_merge_back(struct mem_struct *mm)
-{
- struct mem_struct *next = get_next(mm);
-
- mm->free = 1;
- if (next->free)
- merge_back(mm, next);
- return mm;
-}
-
-/*
- * Here's the main function, malloc, which allocates a memory rigon
- * of size _size_. Returns NULL if failed, or the address newly allocated.
- *
- */
-void *malloc(size_t size)
-{
- struct mem_struct *next = next_start;
- struct mem_struct *good = next, *prev;
- int size_needed = (size + sizeof(struct mem_struct) + 3) & ~3;
-
- while(next) {
- if (next->free && next->size >= size_needed) {
- good = next;
- break;
- }
- next = get_next(next);
- }
- if (good->size < size_needed) {
- printf("Out of memory, maybe we need append it\n");
- return NULL;
- } else if (good->size == size_needed) {
- /*
- * We just found a right memory that with the exact
- * size we want. So we just Mark it _not_free_ here,
- * and move on the _next_start_ pointer, even though
- * the next may not be a right next start.
- */
- good->free = 0;
- next_start = get_next(good);
- goto out;
- } else
- size = good->size; /* save the total size */
-
- /*
- * Note: allocate a new memory region will not change
- * it's prev memory, so we don't need change it here.
- */
- good->free = 0; /* Mark it not free any more */
- good->size = size_needed;
-
- next = get_next(good);
- if (next) {
- next->size = size - size_needed;
- /* check if it can contain 1 byte allocation at least */
- if (next->size <= (int)sizeof(struct mem_struct)) {
- good->size = size; /* restore the original size */
- next_start = get_next(good);
- goto out;
- }
-
- next->prev = good;
- next->free = 1;
- next_start = next; /* Update next_start */
-
- prev = next;
- next = get_next(next);
- if (next)
- next->prev = prev;
- } else
- next_start = (struct mem_struct *)memory;
-out:
- return (void *)((uint32_t)good + sizeof(struct mem_struct));
-}
-
-void *zalloc(size_t size)
-{
- void *p = malloc(size);
- if (p)
- memset(p, 0, size);
- return p;
-}
-
-void free(void *ptr)
-{
- struct mem_struct *mm = ptr - sizeof(*mm);
- struct mem_struct *prev = mm->prev;
- struct mem_struct *next = get_next(mm);
-
- if (!prev)
- mm = try_merge_back(mm);
- else if (!next)
- mm = try_merge_front(mm);
- else if (prev->free && !next->free)
- merge_front(mm, prev);
- else if (!prev->free && next->free)
- merge_back(mm, next);
- else if (prev->free && next->free)
- merge_both(mm, prev, next);
- else
- mm->free = 1;
-
- if (mm < next_start)
- next_start = mm;
-}
-
-/*
- * The debug function
- */
-void check_mem(void)
-{
- struct mem_struct *next = (struct mem_struct *)memory;
-
- printf("____________\n");
- while (next) {
- printf("%-6d %s\n", next->size, next->free ? "Free" : "Notf");
- next = get_next(next);
- }
- printf("\n");
-}
-
-
-void mem_init(void)
-{
-
- struct mem_struct *first = (struct mem_struct *)memory;
-
- first->prev = NULL;
- first->size = 0x10000;
- first->free = 1;
-
- next_start = first;
-}
--- /dev/null
+/*
+ * free.c
+ *
+ * Very simple linked-list based malloc()/free().
+ */
+
+#include <stdlib.h>
+#include <dprintf.h>
+#include "malloc.h"
+
+static struct free_arena_header *
+__free_block(struct free_arena_header *ah)
+{
+ struct free_arena_header *pah, *nah;
+ struct free_arena_header *head =
+ &__malloc_head[ARENA_HEAP_GET(ah->a.attrs)];
+
+ dprintf("free(%p)\n", (struct arena_header *)ah + 1);
+
+ pah = ah->a.prev;
+ nah = ah->a.next;
+ if ( ARENA_TYPE_GET(pah->a.attrs) == ARENA_TYPE_FREE &&
+ (char *)pah+ARENA_SIZE_GET(pah->a.attrs) == (char *)ah ) {
+ /* Coalesce into the previous block */
+ ARENA_SIZE_SET(pah->a.attrs, ARENA_SIZE_GET(pah->a.attrs) +
+ ARENA_SIZE_GET(ah->a.attrs));
+ pah->a.next = nah;
+ nah->a.prev = pah;
+
+#ifdef DEBUG_MALLOC
+ ARENA_TYPE_SET(ah->a.attrs, ARENA_TYPE_DEAD);
+#endif
+
+ ah = pah;
+ pah = ah->a.prev;
+ } else {
+ /* Need to add this block to the free chain */
+ ARENA_TYPE_SET(ah->a.attrs, ARENA_TYPE_FREE);
+ ah->a.tag = MALLOC_FREE;
+
+ ah->next_free = head->next_free;
+ ah->prev_free = head;
+ head->next_free = ah;
+ ah->next_free->prev_free = ah;
+ }
+
+ /* In either of the previous cases, we might be able to merge
+ with the subsequent block... */
+ if ( ARENA_TYPE_GET(nah->a.attrs) == ARENA_TYPE_FREE &&
+ (char *)ah+ARENA_SIZE_GET(ah->a.attrs) == (char *)nah ) {
+ ARENA_SIZE_SET(ah->a.attrs, ARENA_SIZE_GET(ah->a.attrs) +
+ ARENA_SIZE_GET(nah->a.attrs));
+
+ /* Remove the old block from the chains */
+ nah->next_free->prev_free = nah->prev_free;
+ nah->prev_free->next_free = nah->next_free;
+ ah->a.next = nah->a.next;
+ nah->a.next->a.prev = ah;
+
+#ifdef DEBUG_MALLOC
+ ARENA_TYPE_SET(nah->a.attrs, ARENA_TYPE_DEAD);
+#endif
+ }
+
+ /* Return the block that contains the called block */
+ return ah;
+}
+
+void free(void *ptr)
+{
+ struct free_arena_header *ah;
+
+ if ( !ptr )
+ return;
+
+ ah = (struct free_arena_header *)
+ ((struct arena_header *)ptr - 1);
+
+#ifdef DEBUG_MALLOC
+ assert( ARENA_TYPE_GET(ah->a.attrs) == ARENA_TYPE_USED );
+#endif
+
+ __free_block(ah);
+
+ /* Here we could insert code to return memory to the system. */
+}
+
+/*
+ * This is used to insert a block which is not previously on the
+ * free list. Only the a.size field of the arena header is assumed
+ * to be valid.
+ */
+void __inject_free_block(struct free_arena_header *ah)
+{
+ struct free_arena_header *head =
+ &__malloc_head[ARENA_HEAP_GET(ah->a.attrs)];
+ struct free_arena_header *nah;
+ size_t a_end = (size_t) ah + ARENA_SIZE_GET(ah->a.attrs);
+ size_t n_end;
+
+ dprintf("inject: %#zx bytes @ %p, heap %u (%p)\n",
+ ARENA_SIZE_GET(ah->a.attrs), ah,
+ ARENA_HEAP_GET(ah->a.attrs), head);
+
+ for (nah = head->a.next ; nah != head ; nah = nah->a.next) {
+ n_end = (size_t) nah + ARENA_SIZE_GET(nah->a.attrs);
+
+ /* Is nah entirely beyond this block? */
+ if ((size_t) nah >= a_end)
+ break;
+
+ /* Is this block entirely beyond nah? */
+ if ((size_t) ah >= n_end)
+ continue;
+
+ /* Otherwise we have some sort of overlap - reject this block */
+ return;
+ }
+
+ /* Now, nah should point to the successor block */
+ ah->a.next = nah;
+ ah->a.prev = nah->a.prev;
+ nah->a.prev = ah;
+ ah->a.prev->a.next = ah;
+
+ __free_block(ah);
+}
+
+/*
+ * Free all memory which is tagged with a specific tag.
+ */
+static void __free_tagged(malloc_tag_t tag) {
+ struct free_arena_header *fp, *head;
+ int i;
+
+ for (i = 0; i < NHEAP; i++) {
+ dprintf("__free_tagged(%u) heap %d\n", tag, i);
+ head = &__malloc_head[i];
+ for (fp = head ; fp != head ; fp = fp->a.next) {
+ if (ARENA_TYPE_GET(fp->a.attrs) == ARENA_TYPE_USED &&
+ fp->a.tag == tag)
+ fp = __free_block(fp);
+ }
+ }
+
+ dprintf("__free_tagged(%u) done\n", tag);
+}
+
+void comboot_cleanup_lowmem(com32sys_t *regs)
+{
+ (void)regs;
+
+ __free_tagged(MALLOC_MODULE);
+}
--- /dev/null
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include "malloc.h"
+
+struct free_arena_header __malloc_head[NHEAP];
+
+static char main_heap[65536] __aligned(16);
+extern char __lowmem_heap[];
+
+void mem_init(void)
+{
+ struct free_arena_header *fp;
+ int i;
+ uint16_t *bios_free_mem = (uint16_t *)0x413;
+
+ /* Initialize the head nodes */
+
+ fp = &__malloc_head[0];
+ for (i = 0 ; i < NHEAP ; i++) {
+ fp->a.next = fp->a.prev = fp->next_free = fp->prev_free = fp;
+ fp->a.attrs = ARENA_TYPE_HEAD | (i << ARENA_HEAP_POS);
+ }
+
+ /* Initialize the main heap */
+ fp = (struct free_arena_header *)main_heap;
+ fp->a.attrs = ARENA_TYPE_USED | (HEAP_MAIN << ARENA_HEAP_POS);
+ ARENA_SIZE_SET(fp->a.attrs, sizeof main_heap);
+ __inject_free_block(fp);
+
+ /* Initialize the lowmem heap */
+ fp = (struct free_arena_header *)__lowmem_heap;
+ fp->a.attrs = ARENA_TYPE_USED | (HEAP_LOWMEM << ARENA_HEAP_POS);
+ ARENA_SIZE_SET(fp->a.attrs, (*bios_free_mem << 10) - (uintptr_t)fp);
+ __inject_free_block(fp);
+}
--- /dev/null
+/*
+ * malloc.c
+ *
+ * Very simple linked-list based malloc()/free().
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <dprintf.h>
+#include "malloc.h"
+
+static void *__malloc_from_block(struct free_arena_header *fp,
+ size_t size, malloc_tag_t tag)
+{
+ size_t fsize;
+ struct free_arena_header *nfp, *na;
+ unsigned int heap = ARENA_HEAP_GET(fp->a.attrs);
+
+ fsize = ARENA_SIZE_GET(fp->a.attrs);
+
+ /* We need the 2* to account for the larger requirements of a free block */
+ if ( fsize >= size+2*sizeof(struct arena_header) ) {
+ /* Bigger block than required -- split block */
+ nfp = (struct free_arena_header *)((char *)fp + size);
+ na = fp->a.next;
+
+ ARENA_TYPE_SET(nfp->a.attrs, ARENA_TYPE_FREE);
+ ARENA_HEAP_SET(nfp->a.attrs, heap);
+ ARENA_SIZE_SET(nfp->a.attrs, fsize-size);
+ nfp->a.tag = MALLOC_FREE;
+ ARENA_TYPE_SET(fp->a.attrs, ARENA_TYPE_USED);
+ ARENA_SIZE_SET(fp->a.attrs, size);
+ fp->a.tag = tag;
+
+ /* Insert into all-block chain */
+ nfp->a.prev = fp;
+ nfp->a.next = na;
+ na->a.prev = nfp;
+ fp->a.next = nfp;
+
+ /* Replace current block on free chain */
+ nfp->next_free = fp->next_free;
+ nfp->prev_free = fp->prev_free;
+ fp->next_free->prev_free = nfp;
+ fp->prev_free->next_free = nfp;
+ } else {
+ /* Allocate the whole block */
+ ARENA_TYPE_SET(fp->a.attrs, ARENA_TYPE_USED);
+ fp->a.tag = tag;
+
+ /* Remove from free chain */
+ fp->next_free->prev_free = fp->prev_free;
+ fp->prev_free->next_free = fp->next_free;
+ }
+
+ return (void *)(&fp->a + 1);
+}
+
+static void *_malloc(size_t size, enum heap heap, malloc_tag_t tag)
+{
+ struct free_arena_header *fp;
+ struct free_arena_header *head = &__malloc_head[heap];
+ void *p = NULL;
+
+ dprintf("_malloc(%zu, %u, %u) = ", size, heap, tag);
+
+ if (size) {
+ /* Add the obligatory arena header, and round up */
+ size = (size + 2 * sizeof(struct arena_header) - 1) & ARENA_SIZE_MASK;
+
+ for ( fp = head->next_free ; fp != head ; fp = fp->next_free ) {
+ if ( ARENA_SIZE_GET(fp->a.attrs) >= size ) {
+ /* Found fit -- allocate out of this block */
+ p = __malloc_from_block(fp, size, tag);
+ break;
+ }
+ }
+ }
+
+ dprintf("%p\n", p);
+ return p;
+}
+
+void *malloc(size_t size)
+{
+ return _malloc(size, HEAP_MAIN, MALLOC_CORE);
+}
+
+void *lmalloc(size_t size)
+{
+ return _malloc(size, HEAP_LOWMEM, MALLOC_CORE);
+}
+
+void *pmapi_lmalloc(size_t size)
+{
+ return _malloc(size, HEAP_LOWMEM, MALLOC_MODULE);
+}
--- /dev/null
+/*
+ * malloc.h
+ *
+ * Internals for the memory allocator
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include "core.h"
+
+/*
+ * This is a temporary hack. In Syslinux 5 this will be a pointer to
+ * the owner module.
+ */
+typedef size_t malloc_tag_t;
+enum malloc_owner {
+ MALLOC_FREE,
+ MALLOC_CORE,
+ MALLOC_MODULE,
+};
+
+struct free_arena_header;
+
+/*
+ * This structure should be a power of two. This becomes the
+ * alignment unit.
+ */
+struct arena_header {
+ malloc_tag_t tag;
+ size_t attrs; /* Bits 0..1: Type
+ 2..3: Heap,
+ 4..31: MSB of the size */
+ struct free_arena_header *next, *prev;
+};
+
+enum arena_type {
+ ARENA_TYPE_USED = 0,
+ ARENA_TYPE_FREE = 1,
+ ARENA_TYPE_HEAD = 2,
+ ARENA_TYPE_DEAD = 3,
+};
+enum heap {
+ HEAP_MAIN,
+ HEAP_LOWMEM,
+ NHEAP
+};
+
+#define ARENA_SIZE_MASK (~(uintptr_t)(sizeof(struct arena_header)-1))
+#define ARENA_HEAP_MASK ((size_t)0xc)
+#define ARENA_HEAP_POS 2
+#define ARENA_TYPE_MASK ((size_t)0x3)
+
+#define ARENA_ALIGN_UP(p) ((char *)(((uintptr_t)(p) + ~ARENA_SIZE_MASK) \
+ & ARENA_SIZE_MASK))
+#define ARENA_ALIGN_DOWN(p) ((char *)((uintptr_t)(p) & ARENA_SIZE_MASK))
+
+#define ARENA_SIZE_GET(attrs) ((attrs) & ARENA_SIZE_MASK)
+#define ARENA_HEAP_GET(attrs) (((attrs) & ARENA_HEAP_MASK) >> ARENA_HEAP_POS)
+#define ARENA_TYPE_GET(attrs) ((attrs) & ARENA_TYPE_MASK)
+
+#define ARENA_SIZE_SET(attrs, size) \
+ ((attrs) = ((size) & ARENA_SIZE_MASK) | ((attrs) & ~ARENA_SIZE_MASK))
+#define ARENA_HEAP_SET(attrs, heap) \
+ ((attrs) = (((heap) << ARENA_HEAP_POS) & ARENA_HEAP_MASK) | \
+ ((attrs) & ~ARENA_HEAP_MASK))
+#define ARENA_TYPE_SET(attrs, type) \
+ ((attrs) = ((attrs) & ~ARENA_TYPE_MASK) | \
+ ((type) & ARENA_TYPE_MASK))
+
+/*
+ * This structure should be no more than twice the size of the
+ * previous structure.
+ */
+struct free_arena_header {
+ struct arena_header a;
+ struct free_arena_header *next_free, *prev_free;
+ size_t _pad[2]; /* Pad to 2*sizeof(struct arena_header) */
+};
+
+extern struct free_arena_header __malloc_head[NHEAP];
+void __inject_free_block(struct free_arena_header *ah);
const struct com32_pmapi pm_api_vector =
{
+ .lmalloc = pmapi_lmalloc, /* Allocate low memory */
+ .lfree = free, /* Free low memory */
+
.read_file = pmapi_read_file,
};
; -----------------------------------------------------------------------
;
; Copyright 2005-2008 H. Peter Anvin - All Rights Reserved
-; Copyright 2009 Intel Corporation; author: H. Peter Anvin
+; Copyright 2009-2010 Intel Corporation; author: H. Peter Anvin
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License as published by
mov ds,%1
mov es,%1
lss esp,[BaseStack]
+ mov dword [PMESP],__stack_end ; Reset PM stack
sti
cld
%endmacro
* Special 16-bit segments
*/
. = ALIGN(65536);
+ .real_mode (NOLOAD) : {
+ *(.real_mode)
+ }
+ real_mode_seg = core_real_mode >> 4;
+
+ . = ALIGN(65536);
.xfer_buf (NOLOAD) : {
*(.xfer_buf)
}
xfer_buf_seg = core_xfer_buf >> 4;
- . = ALIGN(65536);
- .real_mode (NOLOAD) : {
- *(.real_mode)
- }
- real_mode_seg = core_real_mode >> 4;
+ /* Start of the lowmem heap */
+ __lowmem_heap = .;
/*
* 32-bit code. This is a hack for the moment due to the