+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
+Fri Sep 17 10:24:45 1999 Tim Janik <timj@gtk.org>
+
+ * gmem.c (g_mem_chunk_compute_size) (g_mem_chunk_new): applied patch
+ from Soeren Sandmann <sandmann@daimi.au.dk>, to force mem chunk's area
+ sizes to be a multitiple of atom_size, and to eliminate the MAX_MEM_AREA
+ restriction of 65536 bytes. we also catch cases where users pass an area
+ size < atom size with a return_if_fail statement now (which is ok,
+ because previously this lead to memory corruption anyways).
+
Thu Sep 16 13:19:54 1999 Tim Janik <timj@gtk.org>
* glib.h (g_trash_stack_pop): add explicit (GTrashStack*) cast for NULL
#endif
-#define MAX_MEM_AREA 65536L
#define MEM_AREA_SIZE 4L
#if SIZEOF_VOID_P > SIZEOF_LONG
};
-static gulong g_mem_chunk_compute_size (gulong size);
+static gulong g_mem_chunk_compute_size (gulong size,
+ gulong min_size);
static gint g_mem_chunk_area_compare (GMemArea *a,
GMemArea *b);
static gint g_mem_chunk_area_search (GMemArea *a,
GRealMemChunk *mem_chunk;
gulong rarea_size;
+ g_return_val_if_fail (atom_size > 0, NULL);
+ g_return_val_if_fail (area_size >= atom_size, NULL);
+
ENTER_MEM_CHUNK_ROUTINE();
+ area_size = (area_size + atom_size - 1) / atom_size;
+ area_size *= atom_size;
+
mem_chunk = g_new (struct _GRealMemChunk, 1);
mem_chunk->name = name;
mem_chunk->type = type;
if (mem_chunk->atom_size % MEM_ALIGN)
mem_chunk->atom_size += MEM_ALIGN - (mem_chunk->atom_size % MEM_ALIGN);
-
- mem_chunk->area_size = area_size;
- if (mem_chunk->area_size > MAX_MEM_AREA)
- mem_chunk->area_size = MAX_MEM_AREA;
- while (mem_chunk->area_size < mem_chunk->atom_size)
- mem_chunk->area_size *= 2;
-
- rarea_size = mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
- rarea_size = g_mem_chunk_compute_size (rarea_size);
+
+ rarea_size = area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
+ rarea_size = g_mem_chunk_compute_size (rarea_size, atom_size + sizeof (GMemArea) - MEM_AREA_SIZE);
mem_chunk->area_size = rarea_size - (sizeof (GMemArea) - MEM_AREA_SIZE);
-
- /*
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- if (mem_chunk->area_size < mem_chunk->atom_size)
- {
- mem_chunk->area_size = (mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE) * 2;
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- }
-
- if (mem_chunk->area_size % mem_chunk->atom_size)
- mem_chunk->area_size += mem_chunk->atom_size - (mem_chunk->area_size % mem_chunk->atom_size);
- */
-
+
g_mutex_lock (mem_chunks_lock);
mem_chunk->next = mem_chunks;
mem_chunk->prev = NULL;
static gulong
-g_mem_chunk_compute_size (gulong size)
+g_mem_chunk_compute_size (gulong size,
+ gulong min_size)
{
gulong power_of_2;
gulong lower, upper;
lower = power_of_2 >> 1;
upper = power_of_2;
- if ((size - lower) < (upper - size))
+ if (size - lower < upper - size && lower >= min_size)
return lower;
- return upper;
+ else
+ return upper;
}
static gint
#endif
-#define MAX_MEM_AREA 65536L
#define MEM_AREA_SIZE 4L
#if SIZEOF_VOID_P > SIZEOF_LONG
};
-static gulong g_mem_chunk_compute_size (gulong size);
+static gulong g_mem_chunk_compute_size (gulong size,
+ gulong min_size);
static gint g_mem_chunk_area_compare (GMemArea *a,
GMemArea *b);
static gint g_mem_chunk_area_search (GMemArea *a,
GRealMemChunk *mem_chunk;
gulong rarea_size;
+ g_return_val_if_fail (atom_size > 0, NULL);
+ g_return_val_if_fail (area_size >= atom_size, NULL);
+
ENTER_MEM_CHUNK_ROUTINE();
+ area_size = (area_size + atom_size - 1) / atom_size;
+ area_size *= atom_size;
+
mem_chunk = g_new (struct _GRealMemChunk, 1);
mem_chunk->name = name;
mem_chunk->type = type;
if (mem_chunk->atom_size % MEM_ALIGN)
mem_chunk->atom_size += MEM_ALIGN - (mem_chunk->atom_size % MEM_ALIGN);
-
- mem_chunk->area_size = area_size;
- if (mem_chunk->area_size > MAX_MEM_AREA)
- mem_chunk->area_size = MAX_MEM_AREA;
- while (mem_chunk->area_size < mem_chunk->atom_size)
- mem_chunk->area_size *= 2;
-
- rarea_size = mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
- rarea_size = g_mem_chunk_compute_size (rarea_size);
+
+ rarea_size = area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
+ rarea_size = g_mem_chunk_compute_size (rarea_size, atom_size + sizeof (GMemArea) - MEM_AREA_SIZE);
mem_chunk->area_size = rarea_size - (sizeof (GMemArea) - MEM_AREA_SIZE);
-
- /*
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- if (mem_chunk->area_size < mem_chunk->atom_size)
- {
- mem_chunk->area_size = (mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE) * 2;
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- }
-
- if (mem_chunk->area_size % mem_chunk->atom_size)
- mem_chunk->area_size += mem_chunk->atom_size - (mem_chunk->area_size % mem_chunk->atom_size);
- */
-
+
g_mutex_lock (mem_chunks_lock);
mem_chunk->next = mem_chunks;
mem_chunk->prev = NULL;
static gulong
-g_mem_chunk_compute_size (gulong size)
+g_mem_chunk_compute_size (gulong size,
+ gulong min_size)
{
gulong power_of_2;
gulong lower, upper;
lower = power_of_2 >> 1;
upper = power_of_2;
- if ((size - lower) < (upper - size))
+ if (size - lower < upper - size && lower >= min_size)
return lower;
- return upper;
+ else
+ return upper;
}
static gint