* gdb.base/bigcore.exp: New file.
* gdb.base/bigcore.c: New file.
+2004-02-16 Andrew Cagney <cagney@redhat.com>
+
+ * gdb.base/bigcore.exp: New file.
+ * gdb.base/bigcore.c: New file.
+
2004-02-13 Andrew Cagney <cagney@redhat.com>
* gdb.mi/mi1-basics.exp, gdb.mi/mi1-break.exp: Delete file.
--- /dev/null
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2004 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ Please email any bugs, comments, and/or additions to this file to:
+ bug-gdb@prep.ai.mit.edu */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+
+/* Print routines:
+
+ The following are so that printf et.al. can be avoided. Those
+ might try to use malloc() and that, for this code, would be a
+ disaster. */
+
+#define printf do not use
+
+const char digit[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+static void
+print_char (char c)
+{
+ write (1, &c, sizeof (c));
+}
+
+static void
+print_unsigned (unsigned long u)
+{
+ if (u >= 10)
+ print_unsigned (u / 10);
+ print_char (digit[u % 10]);
+}
+
+static void
+print_hex (unsigned long u)
+{
+ if (u >= 16)
+ print_hex (u / 16);
+ print_char (digit[u % 16]);
+}
+
+static void
+print_string (const char *s)
+{
+ for (; (*s) != '\0'; s++)
+ print_char ((*s));
+}
+
+static void
+print_address (const void *a)
+{
+ print_string ("0x");
+ print_hex ((unsigned long) a);
+}
+
+/* Print the current values of RESOURCE. */
+
+static void
+print_rlimit (int resource)
+{
+ struct rlimit rl;
+ getrlimit (resource, &rl);
+ print_string ("cur=0x");
+ print_hex (rl.rlim_cur);
+ print_string (" max=0x");
+ print_hex (rl.rlim_max);
+}
+
+static void
+maximize_rlimit (int resource, const char *prefix)
+{
+ struct rlimit rl;
+ print_string (" ");
+ print_string (prefix);
+ print_string (": ");
+ print_rlimit (resource);
+ getrlimit (resource, &rl);
+ rl.rlim_cur = rl.rlim_max;
+ setrlimit (resource, &rl);
+ print_string (" -> ");
+ print_rlimit (resource);
+ print_string ("\n");
+}
+
+/* Maintain a doublely linked list. */
+struct list
+{
+ struct list *next;
+ struct list *prev;
+ size_t size;
+};
+
+/* Put the "heap" in the DATA section. That way it is more likely
+ that the variable will occur early in the core file (an address
+ before the heap) and hence more likely that GDB will at least get
+ its value right.
+
+ To simplify the list append logic, start the heap out with one
+ entry (that lives in the BSS section). */
+
+static struct list dummy;
+static struct list heap = { &dummy, &dummy };
+
+int
+main ()
+{
+ size_t max_chunk_size;
+
+ /* Try to expand all the resource limits beyond the point of sanity
+ - we're after the biggest possible core file. */
+
+ print_string ("Maximize resource limits ...\n");
+#ifdef RLIMIT_CORE
+ maximize_rlimit (RLIMIT_CORE, "core");
+#endif
+#ifdef RLIMIT_DATA
+ maximize_rlimit (RLIMIT_DATA, "data");
+#endif
+#ifdef RLIMIT_STACK
+ maximize_rlimit (RLIMIT_STACK, "stack");
+#endif
+#ifdef RLIMIT_AS
+ maximize_rlimit (RLIMIT_AS, "stack");
+#endif
+
+ /* Compute an initial chunk size. The math is dodgy but it works
+ for the moment. Perhaphs there's a constant around somewhere. */
+ {
+ size_t tmp;
+ for (tmp = 1; tmp > 0; tmp <<= 1)
+ max_chunk_size = tmp;
+ }
+
+ /* Allocate as much memory as possible creating a linked list of
+ each section. The linking ensures that some, but not all, the
+ memory is allocated. NB: Some kernels handle this efficiently -
+ only allocating and writing out referenced pages leaving holes in
+ the file for unreferend pages - while others handle this poorly -
+ writing out all pages including those that wern't referenced. */
+
+ print_string ("Alocating the entire heap ...\n");
+ {
+ size_t chunk_size;
+ long bytes_allocated = 0;
+ long chunks_allocated = 0;
+ /* Create a linked list of memory chunks. Start with
+ MAX_CHUNK_SIZE blocks of memory and then try allocating smaller
+ and smaller amounts until all (well at least most) memory has
+ been allocated. */
+ for (chunk_size = max_chunk_size;
+ chunk_size >= sizeof (struct list);
+ chunk_size >>= 1)
+ {
+ unsigned long count = 0;
+ print_string (" ");
+ print_unsigned (chunk_size);
+ print_string (" bytes ... ");
+ while (1)
+ {
+ struct list *chunk = malloc (chunk_size);
+ if (chunk == NULL)
+ break;
+ chunk->size = chunk_size;
+ /* Link it in. */
+ chunk->next = NULL;
+ chunk->prev = heap.prev;
+ heap.prev->next = chunk;
+ heap.prev = chunk;
+ count++;
+ }
+ print_unsigned (count);
+ print_string (" chunks\n");
+ chunks_allocated += count;
+ bytes_allocated += chunk_size * count;
+ }
+ print_string ("Total of ");
+ print_unsigned (bytes_allocated);
+ print_string (" bytes ");
+ print_unsigned (chunks_allocated);
+ print_string (" chunks\n");
+ }
+
+ /* Push everything out to disk. */
+
+ print_string ("Dump core ....\n");
+ *(char*)0 = 0;
+}
--- /dev/null
+# Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004
+# Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Please email any bugs, comments, and/or additions to this file to:
+# bug-gdb@prep.ai.mit.edu
+
+# This file is based on corefile.exp which was written by Fred
+# Fish. (fnf@cygnus.com)
+
+if $tracelevel then {
+ strace $tracelevel
+}
+
+set prms_id 0
+set bug_id 0
+
+# Are we on a target board? As of 2004-02-12, GDB didn't have a
+# mechanism that would let it efficiently access a remote corefile.
+
+if ![isnative] then {
+ untested "Remote system"
+ return
+}
+
+# Can the system run this test (in particular support sparse
+# corefiles)? On systems that lack sparse corefile support this test
+# consumes too many resources - gigabytes worth of disk space and and
+# I/O bandwith.
+
+if { [istarget "*-*-netbsd*"] } {
+ untested "Kernel lacks sparse corefile support (PR gdb/1551)"
+ return
+}
+
+set testfile "bigcore"
+set srcfile ${testfile}.c
+set binfile ${objdir}/${subdir}/${testfile}
+set corefile ${objdir}/${subdir}/${testfile}.corefile
+
+if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
+ gdb_suppress_entire_file "Testcase compile failed, so all tests in this file will automatically fail."
+}
+
+# Create a core file named "TESTFILE.corefile" rather than just
+# "core", to avoid problems with sys admin types that like to
+# regularly prune all files named "core" from the system.
+
+# Some systems append "core" to the name of the program; others append
+# the name of the program to "core"; still others (like Linux, as of
+# May 2003) create cores named "core.PID". In the latter case, we
+# could have many core files lying around, and it may be difficult to
+# tell which one is ours, so let's run the program in a subdirectory.
+
+set found 0
+set coredir "${objdir}/${subdir}/coredir.[getpid]"
+file mkdir $coredir
+catch "system \"(cd ${coredir}; ${binfile}; true) >/dev/null 2>&1\""
+set names [glob -nocomplain -directory $coredir *core*]
+if {[llength $names] == 1} {
+ set file [file join $coredir [lindex $names 0]]
+ remote_exec build "mv $file $corefile"
+ set found 1
+}
+
+# Try to clean up after ourselves.
+remote_file build delete [file join $coredir coremmap.data]
+remote_exec build "rmdir $coredir"
+
+if { $found == 0 } {
+ warning "can't generate a core file - core tests suppressed - check ulimit -c"
+ return 0
+}
+
+# Run GDB on the bigcore program up-to where it will dump core.
+
+gdb_exit
+gdb_start
+gdb_reinitialize_dir $srcdir/$subdir
+gdb_load ${binfile}
+gdb_test "set print sevenbit-strings" "" \
+ "set print sevenbit-strings; ${testfile}"
+gdb_test "set width 0" "" \
+ "set width 0; ${testfile}"
+if { ![runto_main] } then {
+ gdb_suppress_tests;
+}
+set print_core_line [gdb_get_line_number "Dump core"]
+gdb_test "tbreak $print_core_line"
+gdb_test continue ".*print_string.*"
+gdb_test next ".*0 = 0.*"
+
+# Traverse part of bigcore's linked list of memory chunks (forward or
+# backward), saving each chunk's address. I don't know why but
+# expect_out didn't work with gdb_test_multiple.
+
+proc extract_heap { dir } {
+ global gdb_prompt
+ global expect_out
+ set heap ""
+ set test "extract ${dir} heap"
+ set lim 0
+ send_gdb "print heap.${dir}\n"
+ gdb_expect {
+ -re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
+ pass "$test"
+ }
+ -re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
+ set heap [concat $heap $expect_out(1,string)]
+ if { $lim >= 50 } {
+ pass "$test (stop at $lim)"
+ } else {
+ incr lim
+ send_gdb "print \$.${dir}\n"
+ exp_continue
+ }
+ }
+ -re ".*$gdb_prompt $" {
+ fail "$test (entry $lim)"
+ }
+ timeout {
+ fail "$test (timeout)"
+ }
+ }
+ return $heap;
+}
+set next_heap [extract_heap next]
+set prev_heap [extract_heap prev]
+
+# Now load up that core file
+
+set test "load corefile"
+gdb_test_multiple "core $corefile" "$test" {
+ -re "A program is being debugged already. Kill it. .y or n. " {
+ send_gdb "y\n"
+ exp_continue
+ }
+ -re "Core was generated by.*$gdb_prompt $" {
+ pass "$test"
+ }
+}
+
+# Finally, re-traverse bigcore's linked list, checking each chunk's
+# address against the executable. Don't use gdb_test_multiple as want
+# only one pass/fail. Don't use exp_continue as the regular
+# expression involving $heap needs to be re-evaluated for each new
+# response.
+
+proc check_heap { dir heap } {
+ global gdb_prompt
+ set test "check ${dir} heap"
+ set ok 1
+ set lim 0
+ send_gdb "print heap.${dir}\n"
+ while { $ok } {
+ gdb_expect {
+ -re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
+ if { $lim >= [llength $heap] } {
+ pass "$test"
+ set ok 0
+ } else {
+ incr lim
+ send_gdb "print \$.${dir}\n"
+ }
+ }
+ -re ".*$gdb_prompt $" {
+ fail "$test (address [lindex $heap $lim])"
+ set ok 0
+ }
+ timeout {
+ fail "$test (timeout)"
+ set ok 0
+ }
+ }
+ }
+}
+
+check_heap next $next_heap
+check_heap prev $prev_heap