+2003-07-05 Phil Edwards <pme@gcc.gnu.org>
+
+ * scripts/create_testsuite_files: New file.
+ * testsuite/Makefile.am (all-local, check-performance): Use it.
+ * testsuite/lib/libstdc++-v3-dg.exp (v3-computer-tests): Remove.
+ * testsuite/Makefile.in: Regenerated.
+
+ * testsuite/performance/filebuf_sputc.cc: Remove the temporary
+ files at the end.
+ * testsuite/performance/fstream_seek_write.cc: Likewise.
+ * testsuite/performance/ofstream_insert_float.cc: Likewise.
+ * testsuite/performance/ofstream_insert_int.cc: Likewise.
+ * testsuite/abi_check.cc (main): Nicer spacing in usage output.
+
2003-07-05 Gawain Bolton <gp.bolton@computer.org>
* include/bits/stl_list.h: Performance and memory usage
--- /dev/null
+#!/bin/sh
+
+# Constructs lists of source files (full pathnames) to test. Two
+# files are constructed: testsuite_files, which is used to test with
+# the default dg-runtest command, and testsuite_files_interactive,
+# which is used to test cases that require input to be entered. In
+# addition, both lists are pruned of wchar_t tests if the toolchain
+# under test does not support wchar_t functionality.
+#
+# We mimic the mkcheck script in that the first time this is run, all
+# existing files are listed in "testsuite_files" in the output
+# directory. Subsequent runs pull the list from that file, allowing
+# users to trim the list down to problematic tests, or just run
+# paticular directories or sub-directories of tests.
+#
+# Selecting individual tests can also be done with RUNTESTFLAGS, but
+# that doesn't really do all that we are trying to accomplish here.
+
+LC_ALL=C
+export LC_ALL
+
+# Both of these are in the appropriate testsuite subdirectories.
+srcdir="$1"
+outdir="$2"
+
+tmp="${TMPDIR:-/tmp}/ctt$$"
+tests_file_normal="$outdir/testsuite_files"
+tests_file_inter="$outdir/testsuite_files_interactive"
+tests_file_perf="$outdir/testsuite_files_performance"
+
+cd $srcdir
+# This is the ugly version of "everything but the current directory". It's
+# what has to happen when find(1) doesn't support -mindepth.
+dlist=`echo [0-9][0-9]*`
+for d in [a-z]*; do
+ test -d $d && dlist="$dlist $d"
+done
+find $dlist -type f -name "*.cc" | sort > $tmp.1
+
+# If the library is not configured to support wchar_t, don't run those tests.
+if test -f "$outdir/testsuite_wchar_t"; then
+ mv $tmp.1 $tmp.2
+else
+ grep -v wchar_t $tmp.1 > $tmp.2
+fi
+
+# Now filter out classes of tests. These classes are run using special rules.
+grep _xin $tmp.2 > $tests_file_inter
+grep -v _xin $tmp.2 > $tmp.3
+
+grep performance $tmp.3 > $tests_file_perf
+grep -v performance $tmp.3 > $tmp.4
+
+# ...more filters go here.
+cp $tmp.4 $tests_file_normal
+
+rm $tmp*
+exit 0
AM_MAKEFLAGS = -j1
AM_RUNTESTFLAGS =
RUNTESTFLAGS =
+PWD_COMMAND = $${PWDCMD-pwd}
## CXX is actually a "C" compiler. These are real C++ programs.
glibcxx_srcdir=@glibcxx_srcdir@
-nostdinc++ \
@GLIBCXX_INCLUDES@ @LIBSUPCXX_INCLUDES@ @TOPLEVEL_INCLUDES@
+# Generated lists of files to run. All of these names are valid make
+# targets, if you wish to generate a list manually.
+lists_of_files = \
+ testsuite_files \
+ testsuite_files_interactive \
+ testsuite_files_performance
+
## Build support library.
noinst_LIBRARIES = libv3test.a
libv3test_a_SOURCES = testsuite_hooks.cc testsuite_allocator.cc
endif
abi_check_SOURCES = abi_check.cc
-all-local: stamp_wchar
+all-local: stamp_wchar testsuite_files
# Enable wchar_t tests if capable.
if GLIBCXX_TEST_WCHAR_T
# ammounts of memory, or otherwise tie up machine resources. Thus,
# running this is off by default.
performance_script=${glibcxx_srcdir}/scripts/check_performance
-check-performance: ${performance_script}
+check-performance: testsuite_files_performance ${performance_script}
-@(chmod + ${performance_script}; \
${performance_script} ${glibcxx_srcdir} ${glibcxx_builddir})
+
+# This rule generates all of the testsuite_files* lists at once.
+${lists_of_files}:
+ ${glibcxx_srcdir}/scripts/create_testsuite_files \
+ ${glibcxx_srcdir}/testsuite `${PWD_COMMAND}`
+
+
.PHONY: baseline_symbols new-abi-baseline check-abi check-abi-verbose \
check-script check-script-install check-performance
DEJATOOL = libstdc++-v3
-EXPECT = `if [ -f @glibcxx_builddir@/../../expect/expect ] ; then \
- echo @glibcxx_builddir@/../../expect/expect ; \
- else echo expect ; fi`
+EXPECT = `if [ -f @glibcxx_builddir@/../../expect/expect ] ; then echo @glibcxx_builddir@/../../expect/expect ; else echo expect ; fi`
-RUNTEST = `if [ -f @glibcxx_srcdir@/../dejagnu/runtest ] ; then \
- echo @glibcxx_srcdir@/../dejagnu/runtest ; \
- else echo runtest; fi`
+RUNTEST = `if [ -f @glibcxx_srcdir@/../dejagnu/runtest ] ; then echo @glibcxx_srcdir@/../dejagnu/runtest ; else echo runtest; fi`
AM_MAKEFLAGS = -j1
AM_RUNTESTFLAGS =
RUNTESTFLAGS =
+PWD_COMMAND = $${PWDCMD-pwd}
glibcxx_srcdir = @glibcxx_srcdir@
glibcxx_builddir = @glibcxx_builddir@
testsuite_flags_script = ${glibcxx_builddir}/scripts/testsuite_flags
CXX = `${testsuite_flags_script} --build-cxx`
-CXXLINK = \
- LD_RUN_PATH=$${LD_RUN_PATH:+$$LD_RUN_PATH:}${glibcxx_builddir}/src/.libs\
- $(LIBTOOL) --tag=CXX --mode=link $(CXX) \
- $(AM_CXXFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@
+CXXLINK = LD_RUN_PATH=$${LD_RUN_PATH:+$$LD_RUN_PATH:}${glibcxx_builddir}/src/.libs $(LIBTOOL) --tag=CXX --mode=link $(CXX) $(AM_CXXFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@
GLIBCXX_INCLUDES = @GLIBCXX_INCLUDES@
LIBSUPCXX_INCLUDES = @LIBSUPCXX_INCLUDES@
TOPLEVEL_INCLUDES = @TOPLEVEL_INCLUDES@
-INCLUDES = \
- -nostdinc++ \
- @GLIBCXX_INCLUDES@ @LIBSUPCXX_INCLUDES@ @TOPLEVEL_INCLUDES@
+INCLUDES = -nostdinc++ @GLIBCXX_INCLUDES@ @LIBSUPCXX_INCLUDES@ @TOPLEVEL_INCLUDES@
+
+
+# Generated lists of files to run. All of these names are valid make
+# targets, if you wish to generate a list manually.
+lists_of_files = testsuite_files testsuite_files_interactive testsuite_files_performance
noinst_LIBRARIES = libv3test.a
libv3test_a_SOURCES = testsuite_hooks.cc testsuite_allocator.cc
-@GLIBCXX_TEST_ABI_TRUE@noinst_PROGRAMS = @GLIBCXX_TEST_ABI_TRUE@abi_check
+@GLIBCXX_TEST_ABI_TRUE@noinst_PROGRAMS = abi_check
@GLIBCXX_TEST_ABI_FALSE@noinst_PROGRAMS =
abi_check_SOURCES = abi_check.cc
performance_script = ${glibcxx_srcdir}/scripts/check_performance
# By adding these files here, automake will remove them for 'make clean'
-CLEANFILES = *.txt *.tst *.exe core* filebuf_* tmp* ostream_* *.log *.sum \
- testsuite_* site.exp abi_check baseline_symbols
+CLEANFILES = *.txt *.tst *.exe core* filebuf_* tmp* ostream_* *.log *.sum testsuite_* site.exp abi_check baseline_symbols
CONFIG_HEADER = ../config.h
CONFIG_CLEAN_FILES =
DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
-TAR = gtar
+TAR = tar
GZIP_ENV = --best
SOURCES = $(libv3test_a_SOURCES) $(abi_check_SOURCES)
OBJECTS = $(libv3test_a_OBJECTS) $(abi_check_OBJECTS)
awk ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
- || (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
+ || (cd $(srcdir) && etags -o $$here/TAGS $(ETAGS_ARGS) $$tags $$unique $(LISP))
mostlyclean-tags:
@echo 'set build_alias $(build_alias)' >> $@-t
@echo 'set build_triplet $(build_triplet)' >> $@-t
@echo '## All variables above are generated by configure. Do Not Edit ##' >> $@-t
- @test ! -f site.exp || sed '1,/^## All variables above are.*##/ d' site.exp >> $@-t
+ @test ! -f $(srcdir)/site.exp || sed '1,/^## All variables above are.*##/ d' $(srcdir)/site.exp >> $@-t
@test ! -f site.exp || mv site.exp site.bak
@mv $@-t site.exp
info-am:
mostlyclean distclean maintainer-clean
-all-local: stamp_wchar
+all-local: stamp_wchar testsuite_files
# Enable wchar_t tests if capable.
@GLIBCXX_TEST_WCHAR_T_TRUE@stamp_wchar:
check-script-install: ${survey_script}
-@(chmod + ${survey_script}; \
${survey_script} 0)
-check-performance: ${performance_script}
+check-performance: testsuite_files_performance ${performance_script}
-@(chmod + ${performance_script}; \
${performance_script} ${glibcxx_srcdir} ${glibcxx_builddir})
+# This rule generates all of the testsuite_files* lists at once.
+${lists_of_files}:
+ ${glibcxx_srcdir}/scripts/create_testsuite_files \
+ ${glibcxx_srcdir}/testsuite `${PWD_COMMAND}`
+
.PHONY: baseline_symbols new-abi-baseline check-abi check-abi-verbose \
check-script check-script-install check-performance
string argv1 = argc > 1 ? argv[1] : "";
if (argv1 == "--help" || argc < 4)
{
- cerr << "usage: abi_check --check current baseline\n"
+ cerr << "usage: abi_check --check current baseline\n"
" --check-verbose current baseline\n"
" --help\n\n"
"Where CURRENT is a file containing the current results from\n"
libstdc++-v3-copy-files [glob -nocomplain "$srcdir/data/*.tst"] $outdir
libstdc++-v3-copy-files [glob -nocomplain "$srcdir/data/*.txt"] $outdir
- # Compute lists of files to test.
- v3-compute-tests
-
# set LD_LIBRARY_PATH so that libgcc_s, libstdc++ binaries can be found.
# locate libgcc.a so we don't need to account for different values of
# SHLIB_EXT on different platforms
}
-# Constructs lists of source files (full pathnames) to test. Two
-# files are constructed: testsuite_files, which is used to test with
-# the default dg-runtest command, and testsuite_files_interactive,
-# which is used to test cases that require input to be entered. In
-# addition, both lists are pruned of wchar_t tests if the toolchain
-# under test does not support wchar_t functionality.
-#
-# We mimic the mkcheck script in that the first time this is run, all
-# existing files are listed in "testsuite_files" in the output
-# directory. Subsequent runs pull the list from that file, allowing
-# users to trim the list down to problematic tests, or just run
-# paticular directories or sub-directories of tests.
-#
-# Selecting individual tests can also be done with RUNTESTFLAGS, but
-# that doesn't really do all that we are trying to accomplish here.
-#
-# Essentially "static" to this file.
-proc v3-compute-tests { } {
- global srcdir
- global outdir
-
- set tests_file "${outdir}/testsuite_files"
- set tests_file_inter "${outdir}/testsuite_files_interactive"
- set tests_file_perf "${outdir}/testsuite_files_performance"
- set sfiles ""
-
- # If there is a testsuite_file, use it.
- if { [file exists $tests_file] } {
- return
- }
-
- # If not, generate it.
- set where_we_were [pwd]
- cd $srcdir
- foreach s [lsort [glob -nocomplain "*/*.cc" "*/*/*.cc" "{,*/}*/*/*/*.cc" ]] {
- lappend sfiles ${s}
- }
- cd $where_we_were
-
- # Filter wchar_t tests.
- # Disable wchar_t tests if library not configured to support
- # wchar_t testing.
- set wchar_file "${outdir}/testsuite_wchar_t"
- if { ! [file exists $wchar_file] } {
- # Remove wchar_t tests files from list.
- set res {}
- foreach w $sfiles {
- if [regexp "wchar_t" $w] {
- verbose "element wchar list is $w"
- } else {
- verbose "element non-wchar list is $w"
- lappend res $w
- }
- }
- set sfiles ${res}
- }
-
- # Filter interactive tests.
- # Use special rules to run these tests.
- set restwo {}
- set sfiles_inter {}
- foreach i $sfiles {
- if [regexp "_xin" $i] {
- verbose "element interactive list is $i"
- lappend sfiles_inter $i
- } else {
- verbose "element non-interactive list is $i"
- lappend restwo $i
- }
- }
- set sfiles ${restwo}
-
- # Filter performance analysis tests.
- # Use special rules to run these tests.
- set resthree {}
- set sfiles_perf {}
- foreach i $sfiles {
- if [regexp "performance" $i] {
- verbose "element performance list is $i"
- lappend sfiles_perf $i
- } else {
- verbose "element non-performancet list is $i"
- lappend resthree $i
- }
- }
- set sfiles ${resthree}
-
- # Write out testsuite_files.
- set f [open $tests_file "w"]
- foreach t $sfiles {
- puts $f $t
- }
- close $f
-
- # Write out testsuite_files_interactive.
- set f [open $tests_file_inter "w"]
- foreach t $sfiles_inter {
- puts $f $t
- }
- close $f
-
- # Write out testsuite_files_performance.
- set f [open $tests_file_perf "w"]
- foreach t $sfiles_perf {
- puts $f $t
- }
- close $f
-}
-
# Called once, from libstdc++-v3.dg/dg.exp.
proc v3-list-tests { filename } {
global srcdir
stop_counters(time, resource);
report_performance(__FILE__, "C++", time, resource);
+ unlink("tmp");
return 0;
}
report_performance(__FILE__, "", time, resource);
}
+ unlink("tmp_perf_seek");
return 0;
}
stop_counters(time, resource);
report_performance(__FILE__, "", time, resource);
+ unlink("tmp_perf_float.txt");
return 0;
};
stop_counters(time, resource);
report_performance(__FILE__, "", time, resource);
+ unlink("tmp_perf_int.txt");
return 0;
};