Imported Upstream version 1.1.2 upstream upstream/1.1.2
authorAnas Nashif <anas.nashif@intel.com>
Fri, 9 Nov 2012 20:21:03 +0000 (12:21 -0800)
committerAnas Nashif <anas.nashif@intel.com>
Fri, 9 Nov 2012 20:21:03 +0000 (12:21 -0800)
874 files changed:
AUTHORS [new file with mode: 0644]
CHANGELOG [new file with mode: 0644]
NEWS [new file with mode: 0644]
PKG-INFO [new file with mode: 0644]
README.txt [new file with mode: 0644]
bin/nosetests [new file with mode: 0755]
distribute_setup.py [new file with mode: 0644]
doc/.static/nose.css [new file with mode: 0644]
doc/.templates/index.html [new file with mode: 0644]
doc/.templates/indexsidebar.html [new file with mode: 0644]
doc/.templates/layout.html [new file with mode: 0644]
doc/.templates/page.html [new file with mode: 0644]
doc/Makefile [new file with mode: 0644]
doc/api.rst [new file with mode: 0644]
doc/api/commands.rst [new file with mode: 0644]
doc/api/config.rst [new file with mode: 0644]
doc/api/core.rst [new file with mode: 0644]
doc/api/importer.rst [new file with mode: 0644]
doc/api/inspector.rst [new file with mode: 0644]
doc/api/loader.rst [new file with mode: 0644]
doc/api/plugin_manager.rst [new file with mode: 0644]
doc/api/proxy.rst [new file with mode: 0644]
doc/api/result.rst [new file with mode: 0644]
doc/api/selector.rst [new file with mode: 0644]
doc/api/suite.rst [new file with mode: 0644]
doc/api/test_cases.rst [new file with mode: 0644]
doc/api/twistedtools.rst [new file with mode: 0644]
doc/api/util.rst [new file with mode: 0644]
doc/conf.py [new file with mode: 0644]
doc/contributing.rst [new file with mode: 0644]
doc/developing.rst [new file with mode: 0644]
doc/doc_tests/test_addplugins/support/test$py.class [new file with mode: 0644]
doc/doc_tests/test_addplugins/support/test.py [new file with mode: 0644]
doc/doc_tests/test_addplugins/support/test.pyc [new file with mode: 0644]
doc/doc_tests/test_addplugins/test_addplugins.rst [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/mod$py.class [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/mod.py [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/mod.pyc [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/test$py.class [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/test.py [new file with mode: 0644]
doc/doc_tests/test_allmodules/support/test.pyc [new file with mode: 0644]
doc/doc_tests/test_allmodules/test_allmodules.rst [new file with mode: 0644]
doc/doc_tests/test_coverage_html/coverage_html.rst [new file with mode: 0644]
doc/doc_tests/test_coverage_html/coverage_html.rst.py3.patch [new file with mode: 0644]
doc/doc_tests/test_coverage_html/coverage_html_fixtures$py.class [new file with mode: 0644]
doc/doc_tests/test_coverage_html/coverage_html_fixtures.py [new file with mode: 0644]
doc/doc_tests/test_coverage_html/coverage_html_fixtures.pyc [new file with mode: 0644]
doc/doc_tests/test_coverage_html/support/blah.py [new file with mode: 0644]
doc/doc_tests/test_coverage_html/support/blah.pyc [new file with mode: 0644]
doc/doc_tests/test_coverage_html/support/tests/test_covered.py [new file with mode: 0644]
doc/doc_tests/test_coverage_html/support/tests/test_covered.pyc [new file with mode: 0644]
doc/doc_tests/test_doctest_fixtures/doctest_fixtures.rst [new file with mode: 0644]
doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class [new file with mode: 0644]
doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py [new file with mode: 0644]
doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc [new file with mode: 0644]
doc/doc_tests/test_init_plugin/example.cfg [new file with mode: 0644]
doc/doc_tests/test_init_plugin/init_plugin.rst [new file with mode: 0644]
doc/doc_tests/test_init_plugin/init_plugin.rst.py3.patch [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/__init__.py [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/test_spam.py [new file with mode: 0644]
doc/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/__init__.py [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/test_eggs.py [new file with mode: 0644]
doc/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc [new file with mode: 0644]
doc/doc_tests/test_issue089/unwanted_package.rst [new file with mode: 0644]
doc/doc_tests/test_issue097/plugintest_environment.rst [new file with mode: 0644]
doc/doc_tests/test_issue107/plugin_exceptions.rst [new file with mode: 0644]
doc/doc_tests/test_issue107/support/test_spam$py.class [new file with mode: 0644]
doc/doc_tests/test_issue107/support/test_spam.py [new file with mode: 0644]
doc/doc_tests/test_issue107/support/test_spam.pyc [new file with mode: 0644]
doc/doc_tests/test_issue119/empty_plugin.rst [new file with mode: 0644]
doc/doc_tests/test_issue119/test_zeronine$py.class [new file with mode: 0644]
doc/doc_tests/test_issue119/test_zeronine.py [new file with mode: 0644]
doc/doc_tests/test_issue119/test_zeronine.pyc [new file with mode: 0644]
doc/doc_tests/test_issue142/errorclass_failure.rst [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failing_test$py.class [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failing_test.py [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failing_test.pyc [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failure_plugin.py [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_tests$py.class [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_tests.py [new file with mode: 0644]
doc/doc_tests/test_issue142/support/errorclass_tests.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/imported_tests.rst [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/__init__.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/test_module$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/test_module.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package1/test_module.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/__init__.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/test_module$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/test_module.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2c/test_module.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/__init__.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/test_module$py.class [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/test_module.py [new file with mode: 0644]
doc/doc_tests/test_issue145/support/package2f/test_module.pyc [new file with mode: 0644]
doc/doc_tests/test_multiprocess/multiprocess.rst [new file with mode: 0644]
doc/doc_tests/test_multiprocess/multiprocess_fixtures$py.class [new file with mode: 0644]
doc/doc_tests/test_multiprocess/multiprocess_fixtures.py [new file with mode: 0644]
doc/doc_tests/test_multiprocess/multiprocess_fixtures.pyc [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_can_split.py [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_can_split.pyc [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_not_shared.py [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_not_shared.pyc [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_shared.py [new file with mode: 0644]
doc/doc_tests/test_multiprocess/support/test_shared.pyc [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/support/bad.cfg [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/support/start.cfg [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/support/test$py.class [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/support/test.py [new file with mode: 0644]
doc/doc_tests/test_restricted_plugin_options/support/test.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/selector_plugin.rst [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mymodule$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mymodule.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mymodule.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/__init__.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/strings$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/strings.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/mypackage/strings.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/math/basic$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/math/basic.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/math/basic.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/strings/cat.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/testlib$py.class [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/testlib.py [new file with mode: 0644]
doc/doc_tests/test_selector_plugin/support/tests/testlib.pyc [new file with mode: 0644]
doc/doc_tests/test_xunit_plugin/support/nosetests.xml [new file with mode: 0644]
doc/doc_tests/test_xunit_plugin/support/test_skip$py.class [new file with mode: 0644]
doc/doc_tests/test_xunit_plugin/support/test_skip.py [new file with mode: 0644]
doc/doc_tests/test_xunit_plugin/support/test_skip.pyc [new file with mode: 0644]
doc/doc_tests/test_xunit_plugin/test_skips.rst [new file with mode: 0644]
doc/docstring.py [new file with mode: 0644]
doc/finding_tests.rst [new file with mode: 0644]
doc/further_reading.rst [new file with mode: 0644]
doc/index.html [new file with mode: 0644]
doc/index.rst [new file with mode: 0644]
doc/man.rst [new file with mode: 0644]
doc/manbuilder.py [new file with mode: 0644]
doc/manbuilder.pyc [new file with mode: 0644]
doc/manpage.py [new file with mode: 0644]
doc/manpage.pyc [new file with mode: 0644]
doc/more_info.rst [new file with mode: 0644]
doc/news.rst [new file with mode: 0644]
doc/plugins.rst [new file with mode: 0644]
doc/plugins/allmodules.rst [new file with mode: 0644]
doc/plugins/attrib.rst [new file with mode: 0644]
doc/plugins/builtin.rst [new file with mode: 0644]
doc/plugins/capture.rst [new file with mode: 0644]
doc/plugins/collect.rst [new file with mode: 0644]
doc/plugins/cover.rst [new file with mode: 0644]
doc/plugins/debug.rst [new file with mode: 0644]
doc/plugins/deprecated.rst [new file with mode: 0644]
doc/plugins/doctests.rst [new file with mode: 0644]
doc/plugins/documenting.rst [new file with mode: 0644]
doc/plugins/errorclasses.rst [new file with mode: 0644]
doc/plugins/failuredetail.rst [new file with mode: 0644]
doc/plugins/interface.rst [new file with mode: 0644]
doc/plugins/isolate.rst [new file with mode: 0644]
doc/plugins/logcapture.rst [new file with mode: 0644]
doc/plugins/multiprocess.rst [new file with mode: 0644]
doc/plugins/other.rst [new file with mode: 0644]
doc/plugins/prof.rst [new file with mode: 0644]
doc/plugins/skip.rst [new file with mode: 0644]
doc/plugins/testid.rst [new file with mode: 0644]
doc/plugins/testing.rst [new file with mode: 0644]
doc/plugins/writing.rst [new file with mode: 0644]
doc/plugins/xunit.rst [new file with mode: 0644]
doc/rtd-requirements.txt [new file with mode: 0644]
doc/setuptools_integration.rst [new file with mode: 0644]
doc/testing.rst [new file with mode: 0644]
doc/testing_tools.rst [new file with mode: 0644]
doc/usage.rst [new file with mode: 0644]
doc/writing_tests.rst [new file with mode: 0644]
examples/attrib_plugin.py [new file with mode: 0644]
examples/html_plugin/htmlplug.py [new file with mode: 0644]
examples/html_plugin/setup.py [new file with mode: 0644]
examples/plugin/plug.py [new file with mode: 0644]
examples/plugin/setup.py [new file with mode: 0644]
functional_tests/doc_tests/test_addplugins/support/test$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_addplugins/support/test.py [new file with mode: 0644]
functional_tests/doc_tests/test_addplugins/support/test.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_addplugins/test_addplugins.rst [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/mod$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/mod.py [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/mod.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/test$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/test.py [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/support/test.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_allmodules/test_allmodules.rst [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/coverage_html.rst [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/coverage_html.rst.py3.patch [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.py [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/support/blah.py [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/support/blah.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.py [new file with mode: 0644]
functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures.rst [new file with mode: 0644]
functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py [new file with mode: 0644]
functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_init_plugin/example.cfg [new file with mode: 0644]
functional_tests/doc_tests/test_init_plugin/init_plugin.rst [new file with mode: 0644]
functional_tests/doc_tests/test_init_plugin/init_plugin.rst.py3.patch [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue089/unwanted_package.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue097/plugintest_environment.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue107/plugin_exceptions.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue107/support/test_spam$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue107/support/test_spam.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue107/support/test_spam.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue119/empty_plugin.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue119/test_zeronine$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue119/test_zeronine.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue119/test_zeronine.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/errorclass_failure.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failing_test$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_tests$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_tests.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue142/support/errorclass_tests.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/imported_tests.rst [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/test_module$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/test_module.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package1/test_module.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/test_module$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/test_module.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2c/test_module.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/test_module$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/test_module.py [new file with mode: 0644]
functional_tests/doc_tests/test_issue145/support/package2f/test_module.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/multiprocess.rst [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.py [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_can_split.py [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_can_split.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_not_shared.py [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_not_shared.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_shared.py [new file with mode: 0644]
functional_tests/doc_tests/test_multiprocess/support/test_shared.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/support/bad.cfg [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/support/start.cfg [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/support/test$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/support/test.py [new file with mode: 0644]
functional_tests/doc_tests/test_restricted_plugin_options/support/test.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/selector_plugin.rst [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mymodule$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mymodule.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mymodule.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/testlib$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.py [new file with mode: 0644]
functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_xunit_plugin/support/nosetests.xml [new file with mode: 0644]
functional_tests/doc_tests/test_xunit_plugin/support/test_skip$py.class [new file with mode: 0644]
functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py [new file with mode: 0644]
functional_tests/doc_tests/test_xunit_plugin/support/test_skip.pyc [new file with mode: 0644]
functional_tests/doc_tests/test_xunit_plugin/test_skips.rst [new file with mode: 0644]
functional_tests/support/att/test_attr$py.class [new file with mode: 0644]
functional_tests/support/att/test_attr.py [new file with mode: 0644]
functional_tests/support/att/test_attr.pyc [new file with mode: 0644]
functional_tests/support/ctx/mod_import_skip$py.class [new file with mode: 0644]
functional_tests/support/ctx/mod_import_skip.py [new file with mode: 0644]
functional_tests/support/ctx/mod_import_skip.pyc [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_fails$py.class [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_fails.py [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_fails.pyc [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_skip$py.class [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_skip.py [new file with mode: 0644]
functional_tests/support/ctx/mod_setup_skip.pyc [new file with mode: 0644]
functional_tests/support/dir1/mod$py.class [new file with mode: 0644]
functional_tests/support/dir1/mod.py [new file with mode: 0644]
functional_tests/support/dir1/mod.pyc [new file with mode: 0644]
functional_tests/support/dir1/pak/__init__$py.class [new file with mode: 0644]
functional_tests/support/dir1/pak/__init__.py [new file with mode: 0644]
functional_tests/support/dir1/pak/__init__.pyc [new file with mode: 0644]
functional_tests/support/dir1/pak/mod$py.class [new file with mode: 0644]
functional_tests/support/dir1/pak/mod.py [new file with mode: 0644]
functional_tests/support/dir1/pak/mod.pyc [new file with mode: 0644]
functional_tests/support/dir1/pak/sub/__init__$py.class [new file with mode: 0644]
functional_tests/support/dir1/pak/sub/__init__.py [new file with mode: 0644]
functional_tests/support/dir1/pak/sub/__init__.pyc [new file with mode: 0644]
functional_tests/support/dir2/mod$py.class [new file with mode: 0644]
functional_tests/support/dir2/mod.py [new file with mode: 0644]
functional_tests/support/dir2/mod.pyc [new file with mode: 0644]
functional_tests/support/dir2/pak/__init__$py.class [new file with mode: 0644]
functional_tests/support/dir2/pak/__init__.py [new file with mode: 0644]
functional_tests/support/dir2/pak/__init__.pyc [new file with mode: 0644]
functional_tests/support/dir2/pak/mod$py.class [new file with mode: 0644]
functional_tests/support/dir2/pak/mod.py [new file with mode: 0644]
functional_tests/support/dir2/pak/mod.pyc [new file with mode: 0644]
functional_tests/support/dir2/pak/sub/__init__$py.class [new file with mode: 0644]
functional_tests/support/dir2/pak/sub/__init__.py [new file with mode: 0644]
functional_tests/support/dir2/pak/sub/__init__.pyc [new file with mode: 0644]
functional_tests/support/dtt/docs/doc.txt [new file with mode: 0644]
functional_tests/support/dtt/docs/errdoc.txt [new file with mode: 0644]
functional_tests/support/dtt/docs/nodoc.txt [new file with mode: 0644]
functional_tests/support/dtt/some_mod$py.class [new file with mode: 0644]
functional_tests/support/dtt/some_mod.py [new file with mode: 0644]
functional_tests/support/dtt/some_mod.pyc [new file with mode: 0644]
functional_tests/support/empty/.hidden [new file with mode: 0644]
functional_tests/support/ep/Some_plugin.egg-info/PKG-INFO [new file with mode: 0644]
functional_tests/support/ep/Some_plugin.egg-info/SOURCES.txt [new file with mode: 0644]
functional_tests/support/ep/Some_plugin.egg-info/dependency_links.txt [new file with mode: 0644]
functional_tests/support/ep/Some_plugin.egg-info/entry_points.txt [new file with mode: 0644]
functional_tests/support/ep/Some_plugin.egg-info/top_level.txt [new file with mode: 0644]
functional_tests/support/ep/setup.py [new file with mode: 0644]
functional_tests/support/ep/someplugin.py [new file with mode: 0644]
functional_tests/support/fdp/test_fdp$py.class [new file with mode: 0644]
functional_tests/support/fdp/test_fdp.py [new file with mode: 0644]
functional_tests/support/fdp/test_fdp.pyc [new file with mode: 0644]
functional_tests/support/fdp/test_fdp_no_capt$py.class [new file with mode: 0644]
functional_tests/support/fdp/test_fdp_no_capt.py [new file with mode: 0644]
functional_tests/support/fdp/test_fdp_no_capt.pyc [new file with mode: 0644]
functional_tests/support/gen/test$py.class [new file with mode: 0644]
functional_tests/support/gen/test.py [new file with mode: 0644]
functional_tests/support/gen/test.pyc [new file with mode: 0644]
functional_tests/support/id_fails/test_a$py.class [new file with mode: 0644]
functional_tests/support/id_fails/test_a.py [new file with mode: 0644]
functional_tests/support/id_fails/test_a.pyc [new file with mode: 0644]
functional_tests/support/id_fails/test_b$py.class [new file with mode: 0644]
functional_tests/support/id_fails/test_b.py [new file with mode: 0644]
functional_tests/support/id_fails/test_b.pyc [new file with mode: 0644]
functional_tests/support/idp/exm$py.class [new file with mode: 0644]
functional_tests/support/idp/exm.py [new file with mode: 0644]
functional_tests/support/idp/exm.pyc [new file with mode: 0644]
functional_tests/support/idp/tests$py.class [new file with mode: 0644]
functional_tests/support/idp/tests.py [new file with mode: 0644]
functional_tests/support/idp/tests.pyc [new file with mode: 0644]
functional_tests/support/ipt/test1/ipthelp$py.class [new file with mode: 0644]
functional_tests/support/ipt/test1/ipthelp.py [new file with mode: 0644]
functional_tests/support/ipt/test1/ipthelp.pyc [new file with mode: 0644]
functional_tests/support/ipt/test1/tests$py.class [new file with mode: 0644]
functional_tests/support/ipt/test1/tests.py [new file with mode: 0644]
functional_tests/support/ipt/test1/tests.pyc [new file with mode: 0644]
functional_tests/support/ipt/test2/ipthelp$py.class [new file with mode: 0644]
functional_tests/support/ipt/test2/ipthelp.py [new file with mode: 0644]
functional_tests/support/ipt/test2/ipthelp.pyc [new file with mode: 0644]
functional_tests/support/ipt/test2/tests$py.class [new file with mode: 0644]
functional_tests/support/ipt/test2/tests.py [new file with mode: 0644]
functional_tests/support/ipt/test2/tests.pyc [new file with mode: 0644]
functional_tests/support/issue038/test$py.class [new file with mode: 0644]
functional_tests/support/issue038/test.py [new file with mode: 0644]
functional_tests/support/issue038/test.pyc [new file with mode: 0644]
functional_tests/support/issue072/test$py.class [new file with mode: 0644]
functional_tests/support/issue072/test.py [new file with mode: 0644]
functional_tests/support/issue072/test.pyc [new file with mode: 0644]
functional_tests/support/issue082/_mypackage/__init__.py [new file with mode: 0644]
functional_tests/support/issue082/_mypackage/_eggs.py [new file with mode: 0644]
functional_tests/support/issue082/_mypackage/bacon.py [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/__init__$py.class [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/__init__.py [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/__init__.pyc [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/_foo$py.class [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/_foo.py [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/_foo.pyc [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/bar$py.class [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/bar.py [new file with mode: 0644]
functional_tests/support/issue082/mypublicpackage/bar.pyc [new file with mode: 0644]
functional_tests/support/issue130/test$py.class [new file with mode: 0644]
functional_tests/support/issue130/test.py [new file with mode: 0644]
functional_tests/support/issue130/test.pyc [new file with mode: 0644]
functional_tests/support/issue143/not-a-package/__init__.py [new file with mode: 0644]
functional_tests/support/issue143/not-a-package/test.py [new file with mode: 0644]
functional_tests/support/issue191/UNKNOWN.egg-info/PKG-INFO [new file with mode: 0644]
functional_tests/support/issue191/UNKNOWN.egg-info/SOURCES.txt [new file with mode: 0644]
functional_tests/support/issue191/UNKNOWN.egg-info/dependency_links.txt [new file with mode: 0644]
functional_tests/support/issue191/UNKNOWN.egg-info/top_level.txt [new file with mode: 0644]
functional_tests/support/issue191/setup.cfg [new file with mode: 0644]
functional_tests/support/issue191/setup.py [new file with mode: 0644]
functional_tests/support/issue191/test$py.class [new file with mode: 0644]
functional_tests/support/issue191/test.py [new file with mode: 0644]
functional_tests/support/issue191/test.pyc [new file with mode: 0644]
functional_tests/support/issue269/test_bad_class$py.class [new file with mode: 0644]
functional_tests/support/issue269/test_bad_class.py [new file with mode: 0644]
functional_tests/support/issue269/test_bad_class.pyc [new file with mode: 0644]
functional_tests/support/issue279/test_mod_setup_fails$py.class [new file with mode: 0644]
functional_tests/support/issue279/test_mod_setup_fails.py [new file with mode: 0644]
functional_tests/support/issue279/test_mod_setup_fails.pyc [new file with mode: 0644]
functional_tests/support/issue408/nosetests.xml [new file with mode: 0644]
functional_tests/support/issue408/test$py.class [new file with mode: 0644]
functional_tests/support/issue408/test.py [new file with mode: 0644]
functional_tests/support/issue408/test.pyc [new file with mode: 0644]
functional_tests/support/ltfn/state$py.class [new file with mode: 0644]
functional_tests/support/ltfn/state.py [new file with mode: 0644]
functional_tests/support/ltfn/state.pyc [new file with mode: 0644]
functional_tests/support/ltfn/test_mod$py.class [new file with mode: 0644]
functional_tests/support/ltfn/test_mod.py [new file with mode: 0644]
functional_tests/support/ltfn/test_mod.pyc [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/__init__$py.class [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/__init__.py [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/__init__.pyc [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/test_mod$py.class [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/test_mod.py [new file with mode: 0644]
functional_tests/support/ltfn/test_pak1/test_mod.pyc [new file with mode: 0644]
functional_tests/support/ltfn/test_pak2/__init__$py.class [new file with mode: 0644]
functional_tests/support/ltfn/test_pak2/__init__.py [new file with mode: 0644]
functional_tests/support/ltfn/test_pak2/__init__.pyc [new file with mode: 0644]
functional_tests/support/ltftc/tests$py.class [new file with mode: 0644]
functional_tests/support/ltftc/tests.py [new file with mode: 0644]
functional_tests/support/ltftc/tests.pyc [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/__init__$py.class [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/__init__.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/__init__.pyc [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/example$py.class [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/example.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/example.pyc [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/test_pkg$py.class [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.pyc [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/__init__.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2$py.class [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.pyc [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2$py.class [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.py [new file with mode: 0644]
functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.pyc [new file with mode: 0644]
functional_tests/support/package1/example$py.class [new file with mode: 0644]
functional_tests/support/package1/example.py [new file with mode: 0644]
functional_tests/support/package1/example.pyc [new file with mode: 0644]
functional_tests/support/package1/tests/test_example_function$py.class [new file with mode: 0644]
functional_tests/support/package1/tests/test_example_function.py [new file with mode: 0644]
functional_tests/support/package1/tests/test_example_function.pyc [new file with mode: 0644]
functional_tests/support/package2/maths$py.class [new file with mode: 0644]
functional_tests/support/package2/maths.py [new file with mode: 0644]
functional_tests/support/package2/maths.pyc [new file with mode: 0644]
functional_tests/support/package2/test_pak/__init__$py.class [new file with mode: 0644]
functional_tests/support/package2/test_pak/__init__.py [new file with mode: 0644]
functional_tests/support/package2/test_pak/__init__.pyc [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_mod$py.class [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_mod.py [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_mod.pyc [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/__init__$py.class [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/__init__.py [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/__init__.pyc [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/test_mod$py.class [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/test_mod.py [new file with mode: 0644]
functional_tests/support/package2/test_pak/test_sub/test_mod.pyc [new file with mode: 0644]
functional_tests/support/package3/lib/a$py.class [new file with mode: 0644]
functional_tests/support/package3/lib/a.py [new file with mode: 0644]
functional_tests/support/package3/lib/a.pyc [new file with mode: 0644]
functional_tests/support/package3/src/b$py.class [new file with mode: 0644]
functional_tests/support/package3/src/b.py [new file with mode: 0644]
functional_tests/support/package3/src/b.pyc [new file with mode: 0644]
functional_tests/support/package3/tests/test_a$py.class [new file with mode: 0644]
functional_tests/support/package3/tests/test_a.py [new file with mode: 0644]
functional_tests/support/package3/tests/test_a.pyc [new file with mode: 0644]
functional_tests/support/package3/tests/test_b$py.class [new file with mode: 0644]
functional_tests/support/package3/tests/test_b.py [new file with mode: 0644]
functional_tests/support/package3/tests/test_b.pyc [new file with mode: 0644]
functional_tests/support/pass/test$py.class [new file with mode: 0644]
functional_tests/support/pass/test.py [new file with mode: 0644]
functional_tests/support/pass/test.pyc [new file with mode: 0644]
functional_tests/support/test.cfg [new file with mode: 0644]
functional_tests/support/test_buggy_generators$py.class [new file with mode: 0644]
functional_tests/support/test_buggy_generators.py [new file with mode: 0644]
functional_tests/support/test_buggy_generators.pyc [new file with mode: 0644]
functional_tests/support/todo/test_with_todo$py.class [new file with mode: 0644]
functional_tests/support/todo/test_with_todo.py [new file with mode: 0644]
functional_tests/support/todo/test_with_todo.pyc [new file with mode: 0644]
functional_tests/support/todo/todoplug$py.class [new file with mode: 0644]
functional_tests/support/todo/todoplug.py [new file with mode: 0644]
functional_tests/support/todo/todoplug.pyc [new file with mode: 0644]
functional_tests/support/twist/test_twisted.py [new file with mode: 0644]
functional_tests/support/xunit.xml [new file with mode: 0644]
functional_tests/support/xunit/test_xunit_as_suite$py.class [new file with mode: 0644]
functional_tests/support/xunit/test_xunit_as_suite.py [new file with mode: 0644]
functional_tests/support/xunit/test_xunit_as_suite.pyc [new file with mode: 0644]
functional_tests/test_attribute_plugin$py.class [new file with mode: 0644]
functional_tests/test_attribute_plugin.py [new file with mode: 0644]
functional_tests/test_attribute_plugin.pyc [new file with mode: 0644]
functional_tests/test_buggy_generators$py.class [new file with mode: 0644]
functional_tests/test_buggy_generators.py [new file with mode: 0644]
functional_tests/test_buggy_generators.pyc [new file with mode: 0644]
functional_tests/test_cases$py.class [new file with mode: 0644]
functional_tests/test_cases.py [new file with mode: 0644]
functional_tests/test_cases.pyc [new file with mode: 0644]
functional_tests/test_collector$py.class [new file with mode: 0644]
functional_tests/test_collector.py [new file with mode: 0644]
functional_tests/test_collector.pyc [new file with mode: 0644]
functional_tests/test_commands$py.class [new file with mode: 0644]
functional_tests/test_commands.py [new file with mode: 0644]
functional_tests/test_commands.pyc [new file with mode: 0644]
functional_tests/test_config_files$py.class [new file with mode: 0644]
functional_tests/test_config_files.py [new file with mode: 0644]
functional_tests/test_config_files.pyc [new file with mode: 0644]
functional_tests/test_doctest_plugin$py.class [new file with mode: 0644]
functional_tests/test_doctest_plugin.py [new file with mode: 0644]
functional_tests/test_doctest_plugin.pyc [new file with mode: 0644]
functional_tests/test_entrypoints$py.class [new file with mode: 0644]
functional_tests/test_entrypoints.py [new file with mode: 0644]
functional_tests/test_entrypoints.pyc [new file with mode: 0644]
functional_tests/test_failuredetail_plugin$py.class [new file with mode: 0644]
functional_tests/test_failuredetail_plugin.py [new file with mode: 0644]
functional_tests/test_failuredetail_plugin.pyc [new file with mode: 0644]
functional_tests/test_generator_fixtures$py.class [new file with mode: 0644]
functional_tests/test_generator_fixtures.py [new file with mode: 0644]
functional_tests/test_generator_fixtures.pyc [new file with mode: 0644]
functional_tests/test_id_plugin$py.class [new file with mode: 0644]
functional_tests/test_id_plugin.py [new file with mode: 0644]
functional_tests/test_id_plugin.pyc [new file with mode: 0644]
functional_tests/test_importer$py.class [new file with mode: 0644]
functional_tests/test_importer.py [new file with mode: 0644]
functional_tests/test_importer.pyc [new file with mode: 0644]
functional_tests/test_isolate_plugin$py.class [new file with mode: 0644]
functional_tests/test_isolate_plugin.py [new file with mode: 0644]
functional_tests/test_isolate_plugin.pyc [new file with mode: 0644]
functional_tests/test_issue120/support/some_test$py.class [new file with mode: 0644]
functional_tests/test_issue120/support/some_test.py [new file with mode: 0644]
functional_tests/test_issue120/support/some_test.pyc [new file with mode: 0644]
functional_tests/test_issue120/test_named_test_with_doctest.rst [new file with mode: 0644]
functional_tests/test_issue_072$py.class [new file with mode: 0644]
functional_tests/test_issue_072.py [new file with mode: 0644]
functional_tests/test_issue_072.pyc [new file with mode: 0644]
functional_tests/test_issue_082$py.class [new file with mode: 0644]
functional_tests/test_issue_082.py [new file with mode: 0644]
functional_tests/test_issue_082.pyc [new file with mode: 0644]
functional_tests/test_issue_408$py.class [new file with mode: 0644]
functional_tests/test_issue_408.py [new file with mode: 0644]
functional_tests/test_issue_408.pyc [new file with mode: 0644]
functional_tests/test_load_tests_from_test_case$py.class [new file with mode: 0644]
functional_tests/test_load_tests_from_test_case.py [new file with mode: 0644]
functional_tests/test_load_tests_from_test_case.pyc [new file with mode: 0644]
functional_tests/test_loader$py.class [new file with mode: 0644]
functional_tests/test_loader.py [new file with mode: 0644]
functional_tests/test_loader.pyc [new file with mode: 0644]
functional_tests/test_multiprocessing/support/nameerror.py [new file with mode: 0644]
functional_tests/test_multiprocessing/support/nameerror.pyc [new file with mode: 0644]
functional_tests/test_multiprocessing/support/timeout.py [new file with mode: 0644]
functional_tests/test_multiprocessing/test_nameerror$py.class [new file with mode: 0644]
functional_tests/test_multiprocessing/test_nameerror.py [new file with mode: 0644]
functional_tests/test_multiprocessing/test_nameerror.pyc [new file with mode: 0644]
functional_tests/test_multiprocessing/test_process_timeout$py.class [new file with mode: 0644]
functional_tests/test_multiprocessing/test_process_timeout.py [new file with mode: 0644]
functional_tests/test_multiprocessing/test_process_timeout.pyc [new file with mode: 0644]
functional_tests/test_namespace_pkg$py.class [new file with mode: 0644]
functional_tests/test_namespace_pkg.py [new file with mode: 0644]
functional_tests/test_namespace_pkg.pyc [new file with mode: 0644]
functional_tests/test_plugin_api$py.class [new file with mode: 0644]
functional_tests/test_plugin_api.py [new file with mode: 0644]
functional_tests/test_plugin_api.pyc [new file with mode: 0644]
functional_tests/test_plugins$py.class [new file with mode: 0644]
functional_tests/test_plugins.py [new file with mode: 0644]
functional_tests/test_plugins.pyc [new file with mode: 0644]
functional_tests/test_plugintest$py.class [new file with mode: 0644]
functional_tests/test_plugintest.py [new file with mode: 0644]
functional_tests/test_plugintest.pyc [new file with mode: 0644]
functional_tests/test_program$py.class [new file with mode: 0644]
functional_tests/test_program.py [new file with mode: 0644]
functional_tests/test_program.pyc [new file with mode: 0644]
functional_tests/test_result$py.class [new file with mode: 0644]
functional_tests/test_result.py [new file with mode: 0644]
functional_tests/test_result.pyc [new file with mode: 0644]
functional_tests/test_selector$py.class [new file with mode: 0644]
functional_tests/test_selector.py [new file with mode: 0644]
functional_tests/test_selector.pyc [new file with mode: 0644]
functional_tests/test_skip_pdb_interaction$py.class [new file with mode: 0644]
functional_tests/test_skip_pdb_interaction.py [new file with mode: 0644]
functional_tests/test_skip_pdb_interaction.pyc [new file with mode: 0644]
functional_tests/test_success$py.class [new file with mode: 0644]
functional_tests/test_success.py [new file with mode: 0644]
functional_tests/test_success.pyc [new file with mode: 0644]
functional_tests/test_suite$py.class [new file with mode: 0644]
functional_tests/test_suite.py [new file with mode: 0644]
functional_tests/test_suite.pyc [new file with mode: 0644]
functional_tests/test_withid_failures.rst [new file with mode: 0644]
functional_tests/test_xunit$py.class [new file with mode: 0644]
functional_tests/test_xunit.py [new file with mode: 0644]
functional_tests/test_xunit.pyc [new file with mode: 0644]
install-rpm.sh [new file with mode: 0755]
lgpl.txt [new file with mode: 0644]
nose/__init__.py [new file with mode: 0644]
nose/case.py [new file with mode: 0644]
nose/commands.py [new file with mode: 0644]
nose/config.py [new file with mode: 0644]
nose/core.py [new file with mode: 0644]
nose/exc.py [new file with mode: 0644]
nose/ext/__init__.py [new file with mode: 0644]
nose/ext/dtcompat.py [new file with mode: 0644]
nose/failure.py [new file with mode: 0644]
nose/importer.py [new file with mode: 0644]
nose/inspector.py [new file with mode: 0644]
nose/loader.py [new file with mode: 0644]
nose/plugins/__init__.py [new file with mode: 0644]
nose/plugins/allmodules.py [new file with mode: 0644]
nose/plugins/attrib.py [new file with mode: 0644]
nose/plugins/base.py [new file with mode: 0644]
nose/plugins/builtin.py [new file with mode: 0644]
nose/plugins/capture.py [new file with mode: 0644]
nose/plugins/collect.py [new file with mode: 0644]
nose/plugins/cover.py [new file with mode: 0644]
nose/plugins/debug.py [new file with mode: 0644]
nose/plugins/deprecated.py [new file with mode: 0644]
nose/plugins/doctests.py [new file with mode: 0644]
nose/plugins/errorclass.py [new file with mode: 0644]
nose/plugins/failuredetail.py [new file with mode: 0644]
nose/plugins/isolate.py [new file with mode: 0644]
nose/plugins/logcapture.py [new file with mode: 0644]
nose/plugins/manager.py [new file with mode: 0644]
nose/plugins/multiprocess.py [new file with mode: 0644]
nose/plugins/plugintest.py [new file with mode: 0644]
nose/plugins/prof.py [new file with mode: 0644]
nose/plugins/skip.py [new file with mode: 0644]
nose/plugins/testid.py [new file with mode: 0644]
nose/plugins/xunit.py [new file with mode: 0644]
nose/proxy.py [new file with mode: 0644]
nose/pyversion.py [new file with mode: 0644]
nose/result.py [new file with mode: 0644]
nose/selector.py [new file with mode: 0644]
nose/sphinx/__init__.py [new file with mode: 0644]
nose/sphinx/pluginopts.py [new file with mode: 0644]
nose/suite.py [new file with mode: 0644]
nose/tools.py [new file with mode: 0644]
nose/twistedtools.py [new file with mode: 0644]
nose/usage.txt [new file with mode: 0644]
nose/util.py [new file with mode: 0644]
nosetests.1 [new file with mode: 0644]
patch.py [new file with mode: 0644]
selftest.py [new file with mode: 0755]
setup.cfg [new file with mode: 0644]
setup.py [new file with mode: 0644]
setup3lib.py [new file with mode: 0644]
unit_tests/helpers$py.class [new file with mode: 0644]
unit_tests/helpers.py [new file with mode: 0644]
unit_tests/helpers.pyc [new file with mode: 0644]
unit_tests/mock$py.class [new file with mode: 0644]
unit_tests/mock.py [new file with mode: 0644]
unit_tests/mock.pyc [new file with mode: 0644]
unit_tests/support/bug101/tests.py [new file with mode: 0644]
unit_tests/support/bug105/tests$py.class [new file with mode: 0644]
unit_tests/support/bug105/tests.py [new file with mode: 0644]
unit_tests/support/bug105/tests.pyc [new file with mode: 0644]
unit_tests/support/config_defaults/a.cfg [new file with mode: 0644]
unit_tests/support/config_defaults/b.cfg [new file with mode: 0644]
unit_tests/support/config_defaults/invalid.cfg [new file with mode: 0644]
unit_tests/support/config_defaults/invalid_value.cfg [new file with mode: 0644]
unit_tests/support/doctest/err_doctests$py.class [new file with mode: 0644]
unit_tests/support/doctest/err_doctests.py [new file with mode: 0644]
unit_tests/support/doctest/err_doctests.pyc [new file with mode: 0644]
unit_tests/support/doctest/no_doctests$py.class [new file with mode: 0644]
unit_tests/support/doctest/no_doctests.py [new file with mode: 0644]
unit_tests/support/doctest/no_doctests.pyc [new file with mode: 0644]
unit_tests/support/foo/__init__$py.class [new file with mode: 0644]
unit_tests/support/foo/__init__.py [new file with mode: 0644]
unit_tests/support/foo/__init__.pyc [new file with mode: 0644]
unit_tests/support/foo/bar/__init__$py.class [new file with mode: 0644]
unit_tests/support/foo/bar/__init__.py [new file with mode: 0644]
unit_tests/support/foo/bar/__init__.pyc [new file with mode: 0644]
unit_tests/support/foo/bar/buz$py.class [new file with mode: 0644]
unit_tests/support/foo/bar/buz.py [new file with mode: 0644]
unit_tests/support/foo/bar/buz.pyc [new file with mode: 0644]
unit_tests/support/foo/doctests.txt [new file with mode: 0644]
unit_tests/support/foo/test_foo.py [new file with mode: 0644]
unit_tests/support/foo/tests/dir_test_file.py [new file with mode: 0644]
unit_tests/support/issue006/tests$py.class [new file with mode: 0644]
unit_tests/support/issue006/tests.py [new file with mode: 0644]
unit_tests/support/issue006/tests.pyc [new file with mode: 0644]
unit_tests/support/issue065/tests$py.class [new file with mode: 0644]
unit_tests/support/issue065/tests.py [new file with mode: 0644]
unit_tests/support/issue065/tests.pyc [new file with mode: 0644]
unit_tests/support/issue270/__init__.py [new file with mode: 0644]
unit_tests/support/issue270/__init__.pyc [new file with mode: 0644]
unit_tests/support/issue270/foo_test.py [new file with mode: 0644]
unit_tests/support/issue270/foo_test.pyc [new file with mode: 0644]
unit_tests/support/other/file.txt [new file with mode: 0644]
unit_tests/support/pkgorg/lib/modernity.py [new file with mode: 0644]
unit_tests/support/pkgorg/tests/test_mod.py [new file with mode: 0644]
unit_tests/support/script.py [new file with mode: 0755]
unit_tests/support/test-dir/test.py [new file with mode: 0644]
unit_tests/support/test.py [new file with mode: 0644]
unit_tests/test_attribute_plugin$py.class [new file with mode: 0644]
unit_tests/test_attribute_plugin.py [new file with mode: 0644]
unit_tests/test_attribute_plugin.pyc [new file with mode: 0644]
unit_tests/test_bug105$py.class [new file with mode: 0644]
unit_tests/test_bug105.py [new file with mode: 0644]
unit_tests/test_bug105.pyc [new file with mode: 0644]
unit_tests/test_capture_plugin$py.class [new file with mode: 0644]
unit_tests/test_capture_plugin.py [new file with mode: 0644]
unit_tests/test_capture_plugin.pyc [new file with mode: 0644]
unit_tests/test_cases$py.class [new file with mode: 0644]
unit_tests/test_cases.py [new file with mode: 0644]
unit_tests/test_cases.pyc [new file with mode: 0644]
unit_tests/test_config$py.class [new file with mode: 0644]
unit_tests/test_config.py [new file with mode: 0644]
unit_tests/test_config.pyc [new file with mode: 0644]
unit_tests/test_config_defaults.rst [new file with mode: 0644]
unit_tests/test_core$py.class [new file with mode: 0644]
unit_tests/test_core.py [new file with mode: 0644]
unit_tests/test_core.pyc [new file with mode: 0644]
unit_tests/test_deprecated_plugin$py.class [new file with mode: 0644]
unit_tests/test_deprecated_plugin.py [new file with mode: 0644]
unit_tests/test_deprecated_plugin.pyc [new file with mode: 0644]
unit_tests/test_doctest_error_handling$py.class [new file with mode: 0644]
unit_tests/test_doctest_error_handling.py [new file with mode: 0644]
unit_tests/test_doctest_error_handling.pyc [new file with mode: 0644]
unit_tests/test_doctest_munging.rst [new file with mode: 0644]
unit_tests/test_id_plugin$py.class [new file with mode: 0644]
unit_tests/test_id_plugin.py [new file with mode: 0644]
unit_tests/test_id_plugin.pyc [new file with mode: 0644]
unit_tests/test_importer$py.class [new file with mode: 0644]
unit_tests/test_importer.py [new file with mode: 0644]
unit_tests/test_importer.pyc [new file with mode: 0644]
unit_tests/test_inspector$py.class [new file with mode: 0644]
unit_tests/test_inspector.py [new file with mode: 0644]
unit_tests/test_inspector.pyc [new file with mode: 0644]
unit_tests/test_isolation_plugin$py.class [new file with mode: 0644]
unit_tests/test_isolation_plugin.py [new file with mode: 0644]
unit_tests/test_isolation_plugin.pyc [new file with mode: 0644]
unit_tests/test_issue155.rst [new file with mode: 0644]
unit_tests/test_issue270.rst [new file with mode: 0644]
unit_tests/test_issue270_fixtures$py.class [new file with mode: 0644]
unit_tests/test_issue270_fixtures.py [new file with mode: 0644]
unit_tests/test_issue270_fixtures.pyc [new file with mode: 0644]
unit_tests/test_issue_006$py.class [new file with mode: 0644]
unit_tests/test_issue_006.py [new file with mode: 0644]
unit_tests/test_issue_006.pyc [new file with mode: 0644]
unit_tests/test_issue_064$py.class [new file with mode: 0644]
unit_tests/test_issue_064.py [new file with mode: 0644]
unit_tests/test_issue_064.pyc [new file with mode: 0644]
unit_tests/test_issue_065$py.class [new file with mode: 0644]
unit_tests/test_issue_065.py [new file with mode: 0644]
unit_tests/test_issue_065.pyc [new file with mode: 0644]
unit_tests/test_issue_100.rst [new file with mode: 0644]
unit_tests/test_issue_100.rst.py3.patch [new file with mode: 0644]
unit_tests/test_issue_101$py.class [new file with mode: 0644]
unit_tests/test_issue_101.py [new file with mode: 0644]
unit_tests/test_issue_101.pyc [new file with mode: 0644]
unit_tests/test_issue_159.rst [new file with mode: 0644]
unit_tests/test_issue_227$py.class [new file with mode: 0644]
unit_tests/test_issue_227.py [new file with mode: 0644]
unit_tests/test_issue_227.pyc [new file with mode: 0644]
unit_tests/test_issue_230$py.class [new file with mode: 0644]
unit_tests/test_issue_230.py [new file with mode: 0644]
unit_tests/test_issue_230.pyc [new file with mode: 0644]
unit_tests/test_lazy_suite$py.class [new file with mode: 0644]
unit_tests/test_lazy_suite.py [new file with mode: 0644]
unit_tests/test_lazy_suite.pyc [new file with mode: 0644]
unit_tests/test_loader$py.class [new file with mode: 0644]
unit_tests/test_loader.py [new file with mode: 0644]
unit_tests/test_loader.pyc [new file with mode: 0644]
unit_tests/test_logcapture_plugin$py.class [new file with mode: 0644]
unit_tests/test_logcapture_plugin.py [new file with mode: 0644]
unit_tests/test_logcapture_plugin.pyc [new file with mode: 0644]
unit_tests/test_logging$py.class [new file with mode: 0644]
unit_tests/test_logging.py [new file with mode: 0644]
unit_tests/test_logging.pyc [new file with mode: 0644]
unit_tests/test_ls_tree.rst [new file with mode: 0644]
unit_tests/test_multiprocess$py.class [new file with mode: 0644]
unit_tests/test_multiprocess.py [new file with mode: 0644]
unit_tests/test_multiprocess.pyc [new file with mode: 0644]
unit_tests/test_multiprocess_runner$py.class [new file with mode: 0644]
unit_tests/test_multiprocess_runner.py [new file with mode: 0644]
unit_tests/test_multiprocess_runner.pyc [new file with mode: 0644]
unit_tests/test_pdb_plugin$py.class [new file with mode: 0644]
unit_tests/test_pdb_plugin.py [new file with mode: 0644]
unit_tests/test_pdb_plugin.pyc [new file with mode: 0644]
unit_tests/test_plugin$py.class [new file with mode: 0644]
unit_tests/test_plugin.py [new file with mode: 0644]
unit_tests/test_plugin.pyc [new file with mode: 0644]
unit_tests/test_plugin_interfaces$py.class [new file with mode: 0644]
unit_tests/test_plugin_interfaces.py [new file with mode: 0644]
unit_tests/test_plugin_interfaces.pyc [new file with mode: 0644]
unit_tests/test_plugin_manager$py.class [new file with mode: 0644]
unit_tests/test_plugin_manager.py [new file with mode: 0644]
unit_tests/test_plugin_manager.pyc [new file with mode: 0644]
unit_tests/test_plugins$py.class [new file with mode: 0644]
unit_tests/test_plugins.py [new file with mode: 0644]
unit_tests/test_plugins.pyc [new file with mode: 0644]
unit_tests/test_result_proxy$py.class [new file with mode: 0644]
unit_tests/test_result_proxy.py [new file with mode: 0644]
unit_tests/test_result_proxy.pyc [new file with mode: 0644]
unit_tests/test_selector$py.class [new file with mode: 0644]
unit_tests/test_selector.py [new file with mode: 0644]
unit_tests/test_selector.pyc [new file with mode: 0644]
unit_tests/test_selector_plugins$py.class [new file with mode: 0644]
unit_tests/test_selector_plugins.py [new file with mode: 0644]
unit_tests/test_selector_plugins.pyc [new file with mode: 0644]
unit_tests/test_skip_plugin$py.class [new file with mode: 0644]
unit_tests/test_skip_plugin.py [new file with mode: 0644]
unit_tests/test_skip_plugin.pyc [new file with mode: 0644]
unit_tests/test_suite$py.class [new file with mode: 0644]
unit_tests/test_suite.py [new file with mode: 0644]
unit_tests/test_suite.pyc [new file with mode: 0644]
unit_tests/test_tools$py.class [new file with mode: 0644]
unit_tests/test_tools.py [new file with mode: 0644]
unit_tests/test_tools.pyc [new file with mode: 0644]
unit_tests/test_twisted$py.class [new file with mode: 0644]
unit_tests/test_twisted.py [new file with mode: 0644]
unit_tests/test_twisted.pyc [new file with mode: 0644]
unit_tests/test_twisted_testcase$py.class [new file with mode: 0644]
unit_tests/test_twisted_testcase.py [new file with mode: 0644]
unit_tests/test_twisted_testcase.pyc [new file with mode: 0644]
unit_tests/test_utils$py.class [new file with mode: 0644]
unit_tests/test_utils.py [new file with mode: 0644]
unit_tests/test_utils.pyc [new file with mode: 0644]
unit_tests/test_xunit$py.class [new file with mode: 0644]
unit_tests/test_xunit.py [new file with mode: 0644]
unit_tests/test_xunit.pyc [new file with mode: 0644]

diff --git a/AUTHORS b/AUTHORS
new file mode 100644 (file)
index 0000000..aa962ea
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,23 @@
+Jason Pellerin
+Kumar McMillan
+Mika Eloranta
+Jay Parlar
+Scot Doyle
+James Casbon
+Antoine Pitrou
+John J Lee
+Allen Bierbaum
+Pam Zerbinos
+Augie Fackler
+Peter Fein
+Kevin Mitchell
+Alex Stewart
+Timothee Peignier
+Thomas Kluyver
+Heng Liu
+Rosen Diankov
+Buck Golemon
+Bobby Impollonia
+Takafumi Arakaki
+Peter Bengtsson
+Gary Donovan
diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644 (file)
index 0000000..a736d3d
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,647 @@
+1.1.2
+
+- Fixed regression where the .coverage file was not saved (#439).
+  Patch by Timothée Peignier.
+
+1.1.1
+
+- Fixed missing nose.sphinx module in source distribution (#436).
+
+1.1.0
+
+- Revised multiprocessing implementation so that it works for test generators
+  (#399). Thanks to Rosen Diankov for the patch.
+- More fixes to multiprocessing implemented by Buck Golemon and Gary Donovan
+  (also part of #399).
+- Lots of improvements to the attrib plugin by Bobby Impollonia (#412, #411,
+  #324 and #381)
+- Code coverage plugin now uses native HTML generation when coverage 3 is
+  installed (#264). Thanks to Timothée Peignier for the patch.
+- Xunit plugin now shows test run time in fractions of a second (#317)
+- @attr (from nose.plugins.attrib) can now be used as a class decorator (#292)
+- Fixes Xunit plugin to handle non-UTF8 characters (#395)
+- Fixes Xunit plugin for reporting generator tests (#369)
+- Fixed problems with SkipTest in Python 3.2 (#389)
+- Fixed bug in doctest plugin under python 3. Thanks to Thomas Kluyver
+  for the patch. (#391)
+- Fixes mishandling of custom exceptions during failures (#405)
+- Fixed subtle bug in :option:`--first-package-wins` that made it
+  unpredictable (#293)
+- Fixes case where teardown_class() was called more than once (#408). Thanks
+  to Heng Liu for the patch.
+- Fixes coverage module names -- 'cal' used to also match calendar which was a
+  bug (#433)
+- Fixes capture plugin when exception message contains non-ascii chars (#402)
+- Fixed bug in tests for twisted tools. Thanks to Thomas Kluyver
+  for the patch.
+- Makes :option:`--plugins` more succinct when there are no options (#235)
+
+1.0.0
+
+- Made nose compatible with python 3. **Huge** thanks to Alex "foogod"
+  Stewart!
+
+0.11.4
+
+- Made nose compatible with Python 2.7.
+
+0.11.3
+
+- Fixed default plugin manager's use of plugin overriding. Thanks to
+  rob.daylife for the bug report and patch. (#323).
+
+0.11.2
+
+- Changed plugin loading so that external plugins loaded via extension
+  points can override builtin plugins with the same name.
+- Updated multiprocess plugin and nose's packaging to allow multiprocess
+  plugin to work on Windows (#265).
+- Fixed bug in xunit plugin's interaction with suites and errors in
+  module-level setup. Thanks to Mark McCague for the bug report (#279).
+- Fixed bug in nose.loader.TestLoader that allowed Test classes that raise
+  exceptions in __init__ to crash the test run (#269).
+- Fixed bugs in nose's test suite that caused spurious failures on Windows.
+- Fixed bug in twisted tools: delayed calls were not shut down on
+  reactor stop. Thanks to abbeyj for the patch (#278).
+- Fixed bug where root log handlers were not cleared. For example, this was
+  emitting unwanted messages when testing Google App Engine websites.
+- Fixed bug in test names output by xunit plugin. Thanks to Philip
+  Jenvey for the bug report and patch (#280).
+- Fixed bug in profile plugin that caused stats to fail to print under Python
+  2.5 and later. Thanks to djs at n-cube dot org for the bug report (#285).
+- Improved logcapture filtering, with default setting to filter out log
+  messages from nose itself. Thanks to gfxmonk for the patch (#277).
+- The xunit plugin now tags skipped tests with a <skipped> testcase tag, and
+  prevents the XML from containing invalid control characters.
+- Updated nose to be compatible with python 2.7 (#305).
+- Updated loading of usage document to allow nose to run from within
+  an egg archive (#288).
+- Fixed IronPython checks to make nose compatible with more versions
+  of IronPython. Thanks to Kevin Mitchell for the patch (#316).
+
+0.11.1
+
+- Fixed bug in xunit plugin xml escaping. Thanks to Nat Williams for the bug
+  report (#266).
+- Fixed bug in xunit plugin that could cause test run to crash after certain
+  types of errors or actions by other plugins.
+- Fixed bug in testid plugin that could cause test run to crash after certain
+  types of errors or actions by other plugins.
+- Fixed bug in collect only plugin that caused it to fail when collecting from
+  test generators.
+- Fixed some broken html in docs.
+
+0.11
+
+- **All new documentation!** nose's documentation is now generated by
+  Sphinx. And thanks to Pam Zerbinos, it is much better organized and easier
+  to use and read.
+- Two new core commandline options can help with testing namespace
+  packages. :option:`--first-package-wins` is useful when you want to test one
+  part of a namespace package that uses another part; in previous versions of
+  nose, the other part of the namespace package would be evicted from
+  sys.modules when the 2nd loaded. :option:`--traverse-namespace` is useful if
+  you want nose to discover tests across entries in a package's
+  __path__. (This was formerly the default behavior).
+- To make it easier to use custom plugins without needing setuptools,
+  :func:`nose.core.main` and :func:`nose.core.run` now support an
+  :doc:`addplugins <doc_tests/test_addplugins/test_addplugins>` keyword
+  argument that takes a list of additional plugins to make available. **Note**
+  that adding a plugin to this list **does not** activate or enable the
+  plugin, only makes it available to be enabled via command-line or
+  config file settings.
+- Thanks to Kevin Mitchell, nose is now more compatible with
+  IronPython. IronPython is still not fully supported, but it should work. If
+  you'd like to improve nose's support for IronPython further, please join the
+  nose developer's list and volunteer to become the IronPython maintainer for
+  nose!
+- Added multiprocess plugin that allows tests to be run in parallel
+  across multiple processes.
+- Added logcapture plugin that captures logging messages and prints them with
+  failing tests. Thanks to Max Ischenko for the implementation.
+- Added optional HTML coverage reports to coverage plugin. Thanks to Augie
+  Fackler for the patch.
+- Added plugin that enables collection of tests in all modules. Thanks to
+  Peter Fein for the patch (#137).
+- Added --failed option to testid plugin. When this option is in effect, if
+  any tests failed in the previous test run (so long as testid was active for
+  that test run) only the failed tests will run.
+- Made it possible to 'yield test' in addition to 'yield test,' from test
+  generators. Thanks to Chad Whitacre for the patch (#230).
+- Fixed bug that caused traceback inspector to fail when source code file
+  could not be found. Thanks to Philip Jenvey for the bug report and patch
+  (#236).
+- Fixed some issues limiting compatibility with IronPython. Thanks to Kevin
+  Mitchell for the patch.
+- Added support for module and test case fixtures in doctest files (#60).
+- Added --traverse-namespace commandline option that restores old default
+  behavior of following all package __path__ entries when loading tests from
+  packages. Thanks to Philip Jenvey for the patch (#167).
+- Added --first-package-wins commandline option to better support testing
+  parts of namespace packages. Thanks to Jason Coombs for the bug report
+  (#197).
+- Added versioned nosetests scripts (#123).
+- Fixed bug that would cause context teardown to fail to run in some
+  cases. Thanks to John Shaw for the bug report and patch (#234).
+- Enabled doctest plugin to use variable other than "_" as the default result
+  variable. Thanks to Matt Good for the patch (#163).
+- Fixed bug that would cause unicode output to crash output capture. Thanks to
+  schickb for the bug report (#227).
+- Added setUp and tearDown as valid names for module-level fixtures. Thanks to
+  AgilityNerd for the patch (#211).
+- Fixed bug in list of valid names for package-level fixtures. Thanks to
+  Philip Jenvey for the patch (#237).
+- Updated man page generation using hacked up manpage writer from
+  docutils sandbox. Thanks grubert@users.sourceforge.net for the
+  original module.
+
+0.10.4
+
+- nose is now compatible with python 2.6.
+
+0.10.3
+
+- Fixed bug in nosetests setup command that caused an exception to be raised
+  if run with options. Thanks to Philip Jenvey for the bug report (#191).
+- Raised score of coverage plugin to 200, so that it will execute before
+  default-score plugins, and so be able to catch more import-time code. Thanks
+  to Ned Batchelder for the bug report and patch (#190).
+
+0.10.2
+
+- nose now runs under jython (jython svn trunk only at this time). Thanks to
+  Philip Jenvey, Pam Zerbinos and the other pycon sprinters (#160).
+- Fixed bugs in loader, default plugin manager, and other modules that
+  caused plugin exceptions to be swallowed (#152, #155). Thanks to John J
+  Lee for the bug report and patch.
+- Added selftest.py script, used to test a non-installed distribution of
+  nose (#49). Thanks to Antoine Pitrou and John J Lee for the bug report and
+  patches.
+- Fixed bug in nose.importer that caused errors with namespace
+  packages. Thanks to Philip Jenvey for the bug report and patch (#164).
+- Fixed bug in nose.tools.with_setup that prevented use of multiple
+  @with_setup decorators. Thanks to tlesher for the bug report (#151).
+- Fixed bugs in handling of context fixtures for tests imported into a
+  package. Thanks to Gary Bernhardt for the bug report (#145).
+- Fixed bugs in handling of config files and config file options for plugins
+  excluded by a RestrictedPluginManager. Thanks to John J Lee and Philip
+  Jenvey for the bug reports and patches (#158, #166).
+- Updated ErrorClass exception reporting to be shorter and more clear. Thanks
+  to John J Lee for the patch (#142).
+- Allowed plugins to load tests from modules starting with '_'. Thanks to John
+  J Lee for the patch (#82).
+- Updated documentation about building as rpm (#127).
+- Updated config to make including executable files the default on
+  IronPython as well as on Windows. Thanks to sanxiyn for the bug
+  report and patch (#183).
+- Fixed a python 2.3 incompatibility in errorclass_failure.rst
+  (#173). Thanks to Philip Jenvey for the bug report and patch.
+- Classes with metaclasses can now be collected as tests (#153).
+- Made sure the document tree in the selector plugin test is accurate
+  and tested (#144). Thanks to John J Lee for the bug report and
+  patch.
+- Fixed stack level used when dropping into pdb in a doctest
+  (#154). Thanks to John J Lee for the bug report and patch.
+- Fixed bug in ErrorClassPlugin that made some missing keyword
+  argument errors obscure (#159). Thanks to Philip Jenvey for the bug
+  report and patch.
+
+0.10.1
+
+- Fixed bug in capture plugin that caused it to record captured output
+  on the test in the wrong attribute (#113).
+- Fixed bug in result proxy that caused tests to fail if they accessed
+  certain result attibutes directly (#114). Thanks to Neilen Marais
+  for the bug report.
+- Fixed bug in capture plugin that caused other error formatters
+  changes to be lost if no output was captured (#124). Thanks to
+  someone at ilorentz.org for the bug report.
+- Fixed several bugs in the nosetests setup command that made some
+  options unusable and the command itself unusable when no options
+  were set (#125, #126, #128). Thanks to Alain Poirier for the bug
+  reports.
+- Fixed bug in handling of string errors (#130). Thanks to schl... at
+  uni-oldenburg.de for the bug report.
+- Fixed bug in coverage plugin option handling that prevented
+  --cover-package=mod1,mod2 from working (#117). Thanks to Allen
+  Bierbaum for the patch.
+- Fixed bug in profiler plugin that prevented output from being
+  produced when output capture was enabled on python 2.5
+  (#129). Thanks to James Casbon for the patch.
+- Fixed bug in adapting 0.9 plugins to 0.10 (#119 part one). Thanks to
+  John J Lee for the bug report and tests.
+- Fixed bug in handling of argv in config and plugin test utilities
+  (#119 part two). Thanks to John J Lee for the bug report and patch.
+- Fixed bug where Failure cases due to invalid test name
+  specifications were passed to plugins makeTest (#120). Thanks to
+  John J Lee for the bug report and patch.
+- Fixed bugs in doc css that mangled display in small windows. Thanks
+  to Ben Hoyt for the bug report and Michal Kwiatkowski for the fix.
+- Made it possible to pass a list or comma-separated string as
+  defaultTest to main(). Thanks to Allen Bierbaum for the suggestion
+  and patch.
+- Fixed a bug in nose.selector and nose.util.getpackage that caused
+  directories with names that are not legal python identifiers to be
+  collected as packages (#143). Thanks to John J Lee for the bug
+  report.
+
+0.10.0
+
+- Fixed bug that broke plugins with names containing underscores or
+  hyphens. Thanks to John J Lee for the bug report and patch (Issue
+  #81).
+- Fixed typo in nose.__all__. Thanks to John J Lee for the bug report.
+- Fixed handling of test descriptions that are multiline
+  docstrings. Thanks to James Casbon for the patch (Issue #50).
+- Improved documentation of doctest plugin to make it clear that
+  entities may have doctests, or themselves be tests, but not
+  both. Thanks to John J Lee for the bug report and patch (Issue #84).
+- Made __file__ available in non-python-module doctests.
+- Fixed bug that made it impossible for plugins to exclude package
+  directories from test discovery (Issue #89). Thanks to John J Lee
+  for the bug report and patch.
+- Fixed bug that swallowed TypeError and AttributeError exceptions
+  raised in some plugin calls (Issue #95). Thanks to John J Lee for
+  the bug report.
+- Fixed inconsistencies in many interfaces and docs. Thanks to John J
+  Lee for many bug reports.
+- Fixed bugs in rpm generation (Issue #96). Thanks to Mike Verdone for
+  the bug report and http://bugs.python.org/issue644744 for the fix.
+- Fixed inconsisent use of os.environ in plugin testing
+  utilities. Thanks to John J Lee for the bug report and patch (Issue
+  #97).
+- Fixed bug in test_address that prevented use of nose.case.Test in
+  doctests (Issue #100). Thanks to John J Lee for the bug report.
+- Fixed bug in error class plugin that caused string exceptions to be
+  masked (#Issue 101). Thanks to depaula for the bug report.
+- Fixed bugs in tests and the profiler plugin when running under
+  Windows (Issue #103). Thanks to Sidnei Da Silva for the bug report.
+- Fixed bugs in plugin calls that caused exception swallowing (Issue
+  #107). Thanks to John L Lee for the bug report and patch.
+- Added more plugin example doctests. Thanks to Kumar McMillan and
+  John L Lee for patches and lots of help.
+- Changed default location of id file for TestId plugin from ~/.noseids to
+  .noseids in the configured working directory.
+
+0.10.0b1
+
+- Added support for a description attribute on tests in function and
+  method test cases.  Most useful for generators: set the description
+  attribute on the yielded function.
+- Fixed incorrect signature of addSuccess() method in
+  IPluginInterface. Thanks to nosexunit for the bug report. (Issue
+  #68).
+- Fixed isclass() function in nose.util so that it will not raise an
+  exception when examining instances that have no accessible __bases__
+  attribute. (Issue #65).
+- Fixed passing of tests to result; the test itself and not the
+  wrapper was being passed, resulting in test description plugin hooks
+  not being called. (Issue #71).
+- Fixed bugs in FailureDetail and Capture plugins, and plugin manager
+  and proxy uses of chainable plugin calls. Thanks to Ian Bicking for
+  the bug report (Issue #72).
+- Improved documentation.
+
+0.10.0a2
+
+- Changed entry point name to nose.plugins.0.10 -- dashes and other
+  non-word characters besides . are not allowed in entry point names.
+  (Issue #67)
+- Fixed loading of plugins from that entry point.
+- Fixed backwards-compatibility issue in nose.util (is_generator was
+  renamed isgenerator). (Issue #64)
+- Fixed bug in --logging-config option. Thanks to anton_kr... at yahoo
+  com for the bug report. (Issue #62)
+- Fixed bug in handling of --where argument: first --where was not
+  passed to loader as workingDir. Thanks to nosexunit for the bug
+  report. (Issue #63).
+
+0.10.0a1
+
+- Rewrote test loader to be more drop-in compatible with
+  unittest.TestLoader and to support a more user-friendly command
+  line.
+- Rewrote test runner and result classes to reduce duplication of effort.
+- Revised configuration system to be less convoluted.
+- Added nose.case.TestCase as universal wrapper for all
+  testcases. Plugins always see instances of this class.
+- Added a management layer to the plugin system, allowing for easy use
+  of different plugin loading schemes. The default manager loads
+  builtin plugins, 0.10 plugins under the setuptools entrypoint
+  nose.plugins.0-10 and provides limited support for legacy plugins
+  loaded under the entrypoint nose.plugins.
+- Added plugin hooks in all phases of discovery, running and description.
+- Converted several formely core features to plugins: output capture,
+  assert introspection, pdb, and skipped and deprecated test support.
+- Added id plugin that allows for easier specification of tests on the
+  command line.
+- Added ErrorClassPlugin base class to allow for easier authoring of
+  plugins that handle errors, like the builtin skipped and deprecated
+  test plugins.
+- Added support for loading doctests from non-module files for all
+  supported python versions.
+- Added score property to plugins to allow plugins to execute in a
+  defined order (higher score execute first).
+- Expanded nose's own test suite to include a variety of functional tests.
+- Fixed many bugs.
+
+0.9.3
+
+- Added support for user configuration files. Thanks to Antoine Pitrou for the
+  patch.
+- Fixed bug that caused profiler plugin to leak 0-byte temp files. Thanks to
+  Antoine Pitrou for the patch.
+- Made usage of temp files in profiler plugin more sensible. Thanks to Michael
+  Sclenker for the bug report.
+- Fixed bug that stopped loading of twisted TestCase tests. Thanks to Kumar
+  McMillan for the bug report.
+- Corrected man page location. Thanks to luke macken for the bug report and
+  patch.
+- Added with_setup to nose.tools.__all__. Thanks to Allen Bierbaum for the bug
+  report.
+- Altered plugin loading so that builtin plugins can be loaded without
+  setuptools. Thanks to Allen Bierbaum for the suggestion.
+- Fixed a bug in the doctests plugin that caused an error when multiple
+  exclude arguments were specified. Thanks to mbeachy for the bug report and
+  patch.
+
+0.9.2
+
+- Added nosetests setuptools command. Now you can run python setup.py
+  nosetests and have access to all nose features and plugins. Thanks to James
+  Casbon for the patch.
+- Added make_decorator function to nose.tools. Used to construct decorator
+  functions that are well-behaved and preserve as much of the original
+  function's metadata as possible. Thanks to Antoine Pitrou for the patch.
+- Added nose.twistedtools, contributed by Antoine Pitrou. This module adds
+  @deferred decorator that makes it simple to write deferred tests, with or
+  without timeouts.
+- Added monkeypatch to doctests that keeps doctest from stepping on coverage
+  when the two plugins are used together. Thanks to David Avraamides for the
+  bug report.
+- Added isolation plugin. Use this plugin to automatically restore sys.modules
+  after each test module or package. Thanks to Michal Kwiatkowski for the
+  feature request.
+- Fixed bug where -vvvv turned off verbose logging instead of making it even
+  more verbose. Thanks to Ian Bicking for the bug report.
+- Fixed bug where assert inspection would fail when the trailing """ of a
+  docstring was one of the inspected lines. Thanks to cito at online dot de
+  for the bug report.
+- Updated attrib plugin to allow selection of test methods by attributes of
+  the test case class. Thanks to Jason Hildebrand for the patch.
+- Improved compatibility with python 2.2. Thanks to Chad Whitacre for the
+  patch.
+- Fixed bug in handling of options from setup.cfg. Thanks to Kumar McMillan for
+  the patch.
+- Fixed bug in generator methods, where a generator method using an inline
+  funciton would result in an AttributeError. Thanks to Antoine Pitrou for the
+  bug report.
+- Updated coverage plugin to ignore lines tagged with #pragma: no cover,
+  matching the behavior of coverage.py on the command line. Thanks to Bill
+  Zingler for the bug report.
+- Added a man page for nosetests. Thanks to Gustavo Noronha Silva for the
+  request and providing an example.
+
+0.9.1
+
+- New function nose.runmodule() finds and runs tests only in a
+  single module, which defaults to __main__ (like unittest.main() or
+  doctest.runmodule()). Thanks Greg Wilson for the suggestion.
+- Multiple -w (--where) arguments can now be used in one command line,
+  to find and run tests in multiple locations. Thanks Titus Brown for
+  the suggestion.
+- Multiple --include and --exclude arguments are now accepted in one command
+  line. Thanks Michal Kwiatkowski for the feature request.
+- Coverage will now include modules not imported by any test when
+  using the new --cover-inclusive switch. Thanks James Casbon for the
+  patch.
+- module:TestClass test selections now properly select all tests in the test
+  class.
+- startTest and stopTest are now called in plugins at the beginning and end of
+  test suites, including test modules, as well as individual tests. Thanks
+  Michal Kwiatkowski for the suggestion.
+- Fix bug in test selection when run as ``python setup.py test``: 'test' was
+  passing through and being used as the test name selection. Thanks Kumar
+  McMillan for the bug report.
+- Fix bug in handling of -x/--stop option where the test run would stop on
+  skipped or deprecated tests. Thanks Kumar McMillan for the bug report.
+- Fix bug in loading tests from projects with layouts that place modules in
+  /lib or /src dirs and tests in a parallel /tests dir.
+- Fix bug in python version detection. Thanks Kevin Dangoor for the bug report
+  and fix.
+- Fix log message in selector that could raise IndexError. Thanks Kumar
+  McMillan for the bug report and patch.
+- Fix bug in handling doctest extension arguments specified in environment and
+  on command line. Thanks Ian Bicking for the bug report.
+- Fix bug in running fixtures (setup/teardown) that are not functions, and
+  report a better error message when a fixture is not callable. Thanks Ian
+  Bicking for the bug report.
+
+0.9.0
+
+- More unit tests and better test coverage. Numerous bugfixes deriving from
+  same.
+- Make --exe option do what it says, and turn it on by default on
+  Windows. Add --noexe option so windows users can turn if off.Thanks
+  richard at artsalliancemedia dot com for the bug reports.
+- Handle a working directory that happens to be in the middle of a package
+  more gracefully. Thanks Max Ischenko for the bug report and test case.
+- Fix bugs in test name comparison when a test module is specified whose name
+  overlaps that of a non-test module. Thanks Max Ischenko for the bug report
+  and test case.
+- Fix warning spam when a non-existent test file is requested on the command
+  line. Thanks Max Ischenko for the bug report.
+
+0.9.0b2
+
+- Allow --debug to set any logger to DEBUG. Thanks to casbon at gmail dot com
+  for the patch.
+- Fix doctest help, which was missing notes about the environment variables
+  that it accepts. Thanks to Kumar McMillan for the patch.
+- Restore sys.stdout after run() in nose.core. Thanks to Titus Brown for the
+  bug report.
+- Correct handling of trailing comma in attrib plugin args. Thanks Titus Brown
+  for the patch.
+
+0.9.0b1
+
+- Fix bug in handling of OR conditions in attrib plugin. Thanks to Titus
+  Brown for the bug report.
+- Fix bug in nose.importer that would cause an attribute error when a local
+  module shadowed a builtin, or other object in sys.modules, without a
+  __file__ attribute. Thanks to casbon at gmail dot com for the bug report.
+- Fix bug in nose.tools decorators that would cause decorated tests to appear
+  with incorrect names in result output.
+
+0.9.0a2
+
+- In TestLoader, use inspect's isfunction() and ismethod() to filter functions
+  and methods, instead of callable(). Thanks to Kumar McMillan for reporting
+  the bug.
+- Fix doctest plugin: return an empty iterable when no tests are found in a
+  directory instead of None. Thanks to Kumar McMillan for the bug report and
+  patch.
+- Ignore executable python modules, unless run with --exe file. This is a
+  partial defense against nose causing trouble by loading python modules that
+  are not import-safe. The full defense: don't write modules that aren't
+  import safe!
+- Catch and warn about errors on plugin load instead of dying.
+- Renamed builtin profile module from nose.plugins.profile to
+  nose.plugins.prof to avoid shadowing stdlib profile.py module.
+
+0.9.0a1
+
+- Add support for plugins, with hooks for selecting, loading and reporting on
+  tests. Doctest and coverage are now plugins.
+- Add builtin plugins for profiling with hotshot, selecting tests by
+  attribute (contributed by Mika Eloranta), and warning of missed tests
+  specified on command line.
+- Change command line test selection syntax to match unittest. Thanks to Titus
+  Brown for the suggestion.
+- Option to drop into pdb on error or failure.
+- Option to stop running on first error or failure. Thanks to Kevin Dangoor
+  for the suggestion.
+- Support for doctests in files other than python modules (python 2.4 only)
+- Reimplement base test selection as single self-contained class.
+- Reimplement test loading as unittest-compatible TestLoader class.
+- Remove all monkeypatching.
+- Reimplement output capture and assert introspection support in
+  unittest-compatible Result class.
+- Better support for multiline constructions in assert introspection.
+- More context output with assert introspections.
+- Refactor setuptools test command support to use proxied result, which
+  enables output capture and assert introspection support without
+  monkeypatching. Thanks to Philip J. Eby for the suggestion and skeleton
+  implementation.
+- Add support for generators in test classes. Thanks to Jay Parlar for the
+  suggestion and patch.
+- Add nose.tools package with some helpful test-composition functions and
+  decorators, including @raises, contributed by Scot Doyle.
+- Reimplement nose.main (TestProgram) to have unittest-compatible signature.
+- All-new import path handling. You can even turn it off! (If you don't,
+  nose will ensure that all directories from which it imports anything are on
+  sys.path before the import.)
+- Logging package used for verbose logging.
+- Support for skipped and deprecated tests.
+- Configuration is no longer global.
+
+0.8.7
+
+- Add support for py.test-style test generators. Thanks to Jay Parlar for
+  the suggestion.
+- Fix bug in doctest discovery. Thanks to Richard Cooper for the bug report.
+- Fix bug in output capture being appended to later exceptions. Thanks to
+  Titus Brown for the patch that uncovered the bug.
+- Fix bug(?) in Exception patch that caused masked hasattr/__getattr__ loops
+  to either become actual infinite loops, or at least take so long to finally
+  error out that they might as well be infinite.
+- Add -m option to restrict test running to only tests in a particular package
+  or module. Like the -f option, -m does not restrict test *loading*, only
+  test *execution*.
+- When loading and running a test module, ensure that the module's path is in
+  sys.path for the duration of the run, not just while importing the module.
+- Add id() method to all callable test classes, for greater unittest
+  compatibility.
+
+0.8.6
+
+- Fix bug with coverage output when sys.modules contains entries without
+  __file__ attributes
+- Added -p (--cover-packages) switch that may be used to restrict coverage
+  report to modules in the indicated package(s)
+
+0.8.5
+
+- Output capture and verbose assertion errors now work when run like
+  'python setup.py test', as advertised.
+- Code coverage improvements: now coverage will be output for all modules
+  imported by any means that were not in sys.modules at the start of the test
+  run. By default, test modules will be excluded from the coverage report, but
+  you can include them with the -t (--cover-tests) option.
+
+0.8.4
+
+- Fix bugs in handling of setup/teardown fixtures that could cause TypeError
+  exceptions in fixtures to be silently ignored, or multiple fixtures of the
+  same type to run. Thanks to Titus Brown for the bug report.
+
+0.8.3
+
+- Add -V (--version) switch to nosetests
+- Fix bug where sys.path would not be set up correctly when running some
+  tests, producing spurious import errors (Thanks to Titus Brown and Mike
+  Thomson for the bug reports)
+- For test classses not derived from unittest.TestCase, output (module.Class)
+  "doc string" as test description, when method doc string is available
+  (Thanks to David Keeney for the suggestion, even if this isn't quite what he
+  meant)
+
+0.8.2
+
+- Revise import to bypass sys.path and manipulate sys.modules more
+  intelligently, ensuring that the test module we think we are loading is the
+  module we actually load, and that modules loaded by other imports are not
+  reloaded without cause
+- Allow test directories inside of packages. Formerly directories matching
+  testMatch but lacking an __init__.py would cause an ImportError when located
+  inside of packages
+- Fix bugs in different handling of -f switch in combination with -w and -o
+
+0.8.1
+
+- Fix bug in main() that resulted in incorrect exit status for nosetests
+  script when tests fail
+- Add missing test files to MANIFEST.in
+- Miscellaneous pylint cleanups
+
+0.8
+
+- Add doctest support
+- Add optional code coverage support, using Ned Batchelder's coverage.py;
+  activate with --coverage switch or NOSE_COVERAGE environment variable
+- More informative error message on import error
+- Fix bug where module setup could be called twice and teardown skipped
+  for certain setup method names.
+- main() returns success value, does not exit. run_exit() added to support
+  old behavior; nosetests script now calls nose.run_exit()
+
+0.7.5
+
+- Fix bus error on exit
+- Discover tests inside of non-TestCase classes that match testMatch
+- Reorganize selftest: now selftest tests the output of a full nose run
+- Add test_with_setup.py contributed by Kumar McMillan
+
+0.7.2
+
+- Refactor and correct bugs in discovery and test loading
+- Reorganize and expand documentation
+- Add -f (run this test file only) switch
+
+0.7.1
+
+- Bugfix release: test files in root of working directory were not being
+  stripped of file extension before import.
+
+0.7
+
+- Change license to LGPL
+- Major rework of output capture and assert introspection
+- Improve test discovery: now finds tests in packages
+- Replace -n switch ('no cwd') with -w switch ('look here')
+
+0.6
+
+- New nosetests script
+- Allow specification of names on command line that are loadable but not
+  directly loadable as modules (eg nosetests -o path/to/tests.py)
+- Add optional py.test-like assert introspection. Thanks to Kevin Dangoor
+  for the suggestion.
+- Improvements to selftest
+
+0.5.1
+
+- Increased compatibility with python 2.3 (and maybe earlier)
+- Increased compatibility with tests written for py.test: now calls
+  module.setup_module(module) if module.setup_module() fails
+
diff --git a/NEWS b/NEWS
new file mode 100644 (file)
index 0000000..6bee1ee
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,5 @@
+1.0!
+----
+
+nose version 1.0 adds support for python 3. The thanks of a grateful
+nation go out to Alex Stewart, aka foogod, for all of his great work.
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644 (file)
index 0000000..9e816f2
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.0
+Name: nose
+Version: 1.1.2
+Summary: nose extends unittest to make testing easier
+Home-page: http://readthedocs.org/docs/nose/
+Author: Jason Pellerin
+Author-email: jpellerin+nose@gmail.com
+License: GNU LGPL
+Description: nose extends the test loading and running features of unittest, making
+            it easier to write, find and run tests.
+        
+            By default, nose will run tests in files or directories under the current
+            working directory whose names include "test" or "Test" at a word boundary
+            (like "test_this" or "functional_test" or "TestClass" but not
+            "libtest"). Test output is similar to that of unittest, but also includes
+            captured stdout output from failing tests, for easy print-style debugging.
+        
+            These features, and many more, are customizable through the use of
+            plugins. Plugins included with nose provide support for doctest, code
+            coverage and profiling, flexible attribute-based test selection,
+            output capture and more. More information about writing plugins may be
+            found on in the nose API documentation, here:
+            http://somethingaboutorange.com/mrl/projects/nose/
+        
+            If you have recently reported a bug marked as fixed, or have a craving for
+            the very latest, you may want the unstable development version instead:
+            http://bitbucket.org/jpellerin/nose/get/tip.gz#egg=nose-dev
+            
+Keywords: test unittest doctest automatic discovery
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Testing
diff --git a/README.txt b/README.txt
new file mode 100644 (file)
index 0000000..1b578da
--- /dev/null
@@ -0,0 +1,482 @@
+
+Basic usage
+***********
+
+Use the nosetests script (after installation by setuptools):
+
+   nosetests [options] [(optional) test files or directories]
+
+In addition to passing command-line options, you may also put
+configuration options in a .noserc or nose.cfg file in your home
+directory. These are standard .ini-style config files. Put your
+nosetests configuration in a [nosetests] section, with the -- prefix
+removed:
+
+   [nosetests]
+   verbosity=3
+   with-doctest=1
+
+There are several other ways to use the nose test runner besides the
+*nosetests* script. You may use nose in a test script:
+
+   import nose
+   nose.main()
+
+If you don't want the test script to exit with 0 on success and 1 on
+failure (like unittest.main), use nose.run() instead:
+
+   import nose
+   result = nose.run()
+
+*result* will be true if the test run succeeded, or false if any test
+failed or raised an uncaught exception. Lastly, you can run nose.core
+directly, which will run nose.main():
+
+   python /path/to/nose/core.py
+
+Please see the usage message for the nosetests script for information
+about how to control which tests nose runs, which plugins are loaded,
+and the test output.
+
+
+Extended usage
+==============
+
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression (by
+default: *(?:^|[b_.-])[Tt]est)* will be collected as a test (or source
+for collection of tests). In addition, all other packages found in the
+working directory will be examined for python source files or
+directories that match testMatch. Package discovery descends all the
+way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+   nosetests only_test_this.py
+
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+   nosetests test.module
+   nosetests another.test:TestCase.test_method
+   nosetests a.test:TestCase
+   nosetests /path/to/test/file.py:test_function
+
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+   nosetests -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now
+deprecated and will be removed in a future release. As of nose 0.10,
+you can get the same behavior by specifying the target directories
+*without* the -w switch:
+
+   nosetests /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for the
+additional features (error classes, and plugin-supplied features such
+as output capture and assert introspection) detailed in the options
+below.
+
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+.ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+   [nosetests]
+   verbosity=3
+   with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is
+installed, run:
+
+   nosetests --plugins
+
+You can add -v or -vv to that command to show more information about
+each plugin.
+
+If you are running nose.main() or nose.run() from a script, you can
+specify a list of plugins to use by passing a list of plugins with the
+plugins keyword argument.
+
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
+
+
+Options
+-------
+
+-V, --version
+
+   Output nose version and exit
+
+-p, --plugins
+
+   Output list of available plugins and exit. Combine with higher
+   verbosity for greater detail
+
+-v=DEFAULT, --verbose=DEFAULT
+
+   Be more verbose. [NOSE_VERBOSE]
+
+--verbosity=VERBOSITY
+
+   Set verbosity; --verbosity=2 is the same as -v
+
+-q=DEFAULT, --quiet=DEFAULT
+
+   Be less verbose
+
+-c=FILES, --config=FILES
+
+   Load configuration from config file(s). May be specified multiple
+   times; in that case, all config files will be loaded and combined
+
+-w=WHERE, --where=WHERE
+
+   Look for tests in this directory. May be specified multiple times.
+   The first directory passed will be used as the working directory,
+   in place of the current working directory, which is the default.
+   Others will be added to the list of tests to execute. [NOSE_WHERE]
+
+--py3where=PY3WHERE
+
+   Look for tests in this directory under Python 3.x. Functions the
+   same as 'where', but only applies if running under Python 3.x or
+   above.  Note that, if present under 3.x, this option completely
+   replaces any directories specified with 'where', so the 'where'
+   option becomes ineffective. [NOSE_PY3WHERE]
+
+-m=REGEX, --match=REGEX, --testmatch=REGEX
+
+   Files, directories, function names, and class names that match this
+   regular expression are considered tests.  Default:
+   (?:^|[b_./-])[Tt]est [NOSE_TESTMATCH]
+
+--tests=NAMES
+
+   Run these tests (comma-separated list). This argument is useful
+   mainly from configuration files; on the command line, just pass the
+   tests to run as additional arguments with no switch.
+
+-l=DEFAULT, --debug=DEFAULT
+
+   Activate debug logging for one or more systems. Available debug
+   loggers: nose, nose.importer, nose.inspector, nose.plugins,
+   nose.result and nose.selector. Separate multiple names with a
+   comma.
+
+--debug-log=FILE
+
+   Log debug messages to this file (default: sys.stderr)
+
+--logging-config=FILE, --log-config=FILE
+
+   Load logging config from this file -- bypasses all other logging
+   config settings.
+
+-I=REGEX, --ignore-files=REGEX
+
+   Completely ignore any file that matches this regular expression.
+   Takes precedence over any other settings or plugins. Specifying
+   this option will replace the default setting. Specify this option
+   multiple times to add more regular expressions [NOSE_IGNORE_FILES]
+
+-e=REGEX, --exclude=REGEX
+
+   Don't run tests that match regular expression [NOSE_EXCLUDE]
+
+-i=REGEX, --include=REGEX
+
+   This regular expression will be applied to files, directories,
+   function names, and class names for a chance to include additional
+   tests that do not match TESTMATCH.  Specify this option multiple
+   times to add more regular expressions [NOSE_INCLUDE]
+
+-x, --stop
+
+   Stop running tests after the first error or failure
+
+-P, --no-path-adjustment
+
+   Don't make any changes to sys.path when loading tests [NOSE_NOPATH]
+
+--exe
+
+   Look for tests in python modules that are executable. Normal
+   behavior is to exclude executable modules, since they may not be
+   import-safe [NOSE_INCLUDE_EXE]
+
+--noexe
+
+   DO NOT look for tests in python modules that are executable. (The
+   default on the windows platform is to do so.)
+
+--traverse-namespace
+
+   Traverse through all path entries of a namespace package
+
+--first-package-wins, --first-pkg-wins, --1st-pkg-wins
+
+   nose's importer will normally evict a package from sys.modules if
+   it sees a package with the same name in a different location. Set
+   this option to disable that behavior.
+
+-a=ATTR, --attr=ATTR
+
+   Run only tests that have attributes specified by ATTR [NOSE_ATTR]
+
+-A=EXPR, --eval-attr=EXPR
+
+   Run only tests for whose attributes the Python expression EXPR
+   evaluates to True [NOSE_EVAL_ATTR]
+
+-s, --nocapture
+
+   Don't capture stdout (any stdout output will be printed
+   immediately) [NOSE_NOCAPTURE]
+
+--nologcapture
+
+   Disable logging capture plugin. Logging configurtion will be left
+   intact. [NOSE_NOLOGCAPTURE]
+
+--logging-format=FORMAT
+
+   Specify custom format to print statements. Uses the same format as
+   used by standard logging handlers. [NOSE_LOGFORMAT]
+
+--logging-datefmt=FORMAT
+
+   Specify custom date/time format to print statements. Uses the same
+   format as used by standard logging handlers. [NOSE_LOGDATEFMT]
+
+--logging-filter=FILTER
+
+   Specify which statements to filter in/out. By default, everything
+   is captured. If the output is too verbose, use this option to
+   filter out needless output. Example: filter=foo will capture
+   statements issued ONLY to  foo or foo.what.ever.sub but not foobar
+   or other logger. Specify multiple loggers with comma:
+   filter=foo,bar,baz. If any logger name is prefixed with a minus, eg
+   filter=-foo, it will be excluded rather than included. Default:
+   exclude logging messages from nose itself (-nose). [NOSE_LOGFILTER]
+
+--logging-clear-handlers
+
+   Clear all other logging handlers
+
+--with-coverage
+
+   Enable plugin Coverage:  Activate a coverage report using Ned
+   Batchelder's coverage module.  [NOSE_WITH_COVERAGE]
+
+--cover-package=PACKAGE
+
+   Restrict coverage output to selected packages [NOSE_COVER_PACKAGE]
+
+--cover-erase
+
+   Erase previously collected coverage statistics before run
+
+--cover-tests
+
+   Include test modules in coverage report [NOSE_COVER_TESTS]
+
+--cover-inclusive
+
+   Include all python files under working directory in coverage
+   report.  Useful for discovering holes in test coverage if not all
+   files are imported by the test suite. [NOSE_COVER_INCLUSIVE]
+
+--cover-html
+
+   Produce HTML coverage information
+
+--cover-html-dir=DIR
+
+   Produce HTML coverage information in dir
+
+--pdb
+
+   Drop into debugger on errors
+
+--pdb-failures
+
+   Drop into debugger on failures
+
+--no-deprecated
+
+   Disable special handling of DeprecatedTest exceptions.
+
+--with-doctest
+
+   Enable plugin Doctest:  Activate doctest plugin to find and run
+   doctests in non-test modules.  [NOSE_WITH_DOCTEST]
+
+--doctest-tests
+
+   Also look for doctests in test modules. Note that classes, methods
+   and functions should have either doctests or non-doctest tests, not
+   both. [NOSE_DOCTEST_TESTS]
+
+--doctest-extension=EXT
+
+   Also look for doctests in files with this extension
+   [NOSE_DOCTEST_EXTENSION]
+
+--doctest-result-variable=VAR
+
+   Change the variable name set to the result of the last interpreter
+   command from the default '_'. Can be used to avoid conflicts with
+   the _() function used for text translation.
+   [NOSE_DOCTEST_RESULT_VAR]
+
+--doctest-fixtures=SUFFIX
+
+   Find fixtures for a doctest file in module with this name appended
+   to the base name of the doctest file
+
+--with-isolation
+
+   Enable plugin IsolationPlugin:  Activate the isolation plugin to
+   isolate changes to external modules to a single test module or
+   package. The isolation plugin resets the contents of sys.modules
+   after each test module or package runs to its state before the
+   test. PLEASE NOTE that this plugin should not be used with the
+   coverage plugin, or in any other case where module reloading may
+   produce undesirable side-effects.  [NOSE_WITH_ISOLATION]
+
+-d, --detailed-errors, --failure-detail
+
+   Add detail to error output by attempting to evaluate failed asserts
+   [NOSE_DETAILED_ERRORS]
+
+--with-profile
+
+   Enable plugin Profile:  Use this plugin to run tests using the
+   hotshot profiler.   [NOSE_WITH_PROFILE]
+
+--profile-sort=SORT
+
+   Set sort order for profiler output
+
+--profile-stats-file=FILE
+
+   Profiler stats file; default is a new temp file on each run
+
+--profile-restrict=RESTRICT
+
+   Restrict profiler output. See help for pstats.Stats for details
+
+--no-skip
+
+   Disable special handling of SkipTest exceptions.
+
+--with-id
+
+   Enable plugin TestId:  Activate to add a test id (like #1) to each
+   test name output. Activate with --failed to rerun failing tests
+   only.  [NOSE_WITH_ID]
+
+--id-file=FILE
+
+   Store test ids found in test runs in this file. Default is the file
+   .noseids in the working directory.
+
+--failed
+
+   Run the tests that failed in the last test run.
+
+--processes=NUM
+
+   Spread test run among this many processes. Set a number equal to
+   the number of processors or cores in your machine for best results.
+   [NOSE_PROCESSES]
+
+--process-timeout=SECONDS
+
+   Set timeout for return of results from each test runner process.
+   [NOSE_PROCESS_TIMEOUT]
+
+--process-restartworker
+
+   If set, will restart each worker process once their tests are done,
+   this helps control memory leaks from killing the system.
+   [NOSE_PROCESS_RESTARTWORKER]
+
+--with-xunit
+
+   Enable plugin Xunit: This plugin provides test results in the
+   standard XUnit XML format. [NOSE_WITH_XUNIT]
+
+--xunit-file=FILE
+
+   Path to xml file to store the xunit report in. Default is
+   nosetests.xml in the working directory [NOSE_XUNIT_FILE]
+
+--all-modules
+
+   Enable plugin AllModules: Collect tests from all python modules.
+   [NOSE_ALL_MODULES]
+
+--collect-only
+
+   Enable collect-only:  Collect and output test names only, don't run
+   any tests.  [COLLECT_ONLY]
diff --git a/bin/nosetests b/bin/nosetests
new file mode 100755 (executable)
index 0000000..36e0ee9
--- /dev/null
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+from nose import main
+
+if __name__ == '__main__':
+    main()
diff --git a/distribute_setup.py b/distribute_setup.py
new file mode 100644 (file)
index 0000000..3ea2e66
--- /dev/null
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+    from distribute_setup import use_setuptools
+    use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+    from site import USER_SITE
+except ImportError:
+    USER_SITE = None
+
+try:
+    import subprocess
+
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        return subprocess.call(args) == 0
+
+except ImportError:
+    # will be used for python 2.3
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        # quoting arguments if windows
+        if sys.platform == 'win32':
+            def quote(arg):
+                if ' ' in arg:
+                    return '"%s"' % arg
+                return arg
+            args = [quote(arg) for arg in args]
+        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.14"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # installing
+        log.warn('Installing Distribute')
+        if not _python_cmd('setup.py', 'install'):
+            log.warn('Something went wrong during the installation.')
+            log.warn('See the error message above.')
+    finally:
+        os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # building an egg
+        log.warn('Building a Distribute egg in %s', to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+    finally:
+        os.chdir(old_wd)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+                       % (version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        tarball = download_setuptools(version, download_base,
+                                      to_dir, download_delay)
+        _build_egg(egg, tarball, to_dir)
+    sys.path.insert(0, egg)
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                   to_dir=os.curdir, download_delay=15, no_fake=True):
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    was_imported = 'pkg_resources' in sys.modules or \
+        'setuptools' in sys.modules
+    try:
+        try:
+            import pkg_resources
+            if not hasattr(pkg_resources, '_distribute'):
+                if not no_fake:
+                    _fake_setuptools()
+                raise ImportError
+        except ImportError:
+            return _do_download(version, download_base, to_dir, download_delay)
+        try:
+            pkg_resources.require("distribute>="+version)
+            return
+        except pkg_resources.VersionConflict:
+            e = sys.exc_info()[1]
+            if was_imported:
+                sys.stderr.write(
+                "The required version of distribute (>=%s) is not available,\n"
+                "and can't be installed while this script is running. Please\n"
+                "install a more recent version first, using\n"
+                "'easy_install -U distribute'."
+                "\n\n(Currently using %r)\n" % (version, e.args[0]))
+                sys.exit(2)
+            else:
+                del pkg_resources, sys.modules['pkg_resources']    # reload ok
+                return _do_download(version, download_base, to_dir,
+                                    download_delay)
+        except pkg_resources.DistributionNotFound:
+            return _do_download(version, download_base, to_dir,
+                                download_delay)
+    finally:
+        if not no_fake:
+            _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                        to_dir=os.curdir, delay=15):
+    """Download distribute from a specified location and return its filename
+
+    `version` should be a valid distribute version number that is available
+    as an egg for download under the `download_base` URL (which should end
+    with a '/'). `to_dir` is the directory where the egg will be downloaded.
+    `delay` is the number of seconds to pause before an actual download
+    attempt.
+    """
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    try:
+        from urllib.request import urlopen
+    except ImportError:
+        from urllib2 import urlopen
+    tgz_name = "distribute-%s.tar.gz" % version
+    url = download_base + tgz_name
+    saveto = os.path.join(to_dir, tgz_name)
+    src = dst = None
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        try:
+            log.warn("Downloading %s", url)
+            src = urlopen(url)
+            # Read/write all in one block, so we don't create a corrupt file
+            # if the download is interrupted.
+            data = src.read()
+            dst = open(saveto, "wb")
+            dst.write(data)
+        finally:
+            if src:
+                src.close()
+            if dst:
+                dst.close()
+    return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+    def __no_sandbox(*args, **kw):
+        try:
+            from setuptools.sandbox import DirectorySandbox
+            if not hasattr(DirectorySandbox, '_old'):
+                def violation(*args):
+                    pass
+                DirectorySandbox._old = DirectorySandbox._violation
+                DirectorySandbox._violation = violation
+                patched = True
+            else:
+                patched = False
+        except ImportError:
+            patched = False
+
+        try:
+            return function(*args, **kw)
+        finally:
+            if patched:
+                DirectorySandbox._violation = DirectorySandbox._old
+                del DirectorySandbox._old
+
+    return __no_sandbox
+
+def _patch_file(path, content):
+    """Will backup the file then patch it"""
+    existing_content = open(path).read()
+    if existing_content == content:
+        # already patched
+        log.warn('Already patched.')
+        return False
+    log.warn('Patching...')
+    _rename_path(path)
+    f = open(path, 'w')
+    try:
+        f.write(content)
+    finally:
+        f.close()
+    return True
+
+_patch_file = _no_sandbox(_patch_file)
+
+def _same_content(path, content):
+    return open(path).read() == content
+
+def _rename_path(path):
+    new_name = path + '.OLD.%s' % time.time()
+    log.warn('Renaming %s into %s', path, new_name)
+    os.rename(path, new_name)
+    return new_name
+
+def _remove_flat_installation(placeholder):
+    if not os.path.isdir(placeholder):
+        log.warn('Unkown installation at %s', placeholder)
+        return False
+    found = False
+    for file in os.listdir(placeholder):
+        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+            found = True
+            break
+    if not found:
+        log.warn('Could not locate setuptools*.egg-info')
+        return
+
+    log.warn('Removing elements out of the way...')
+    pkg_info = os.path.join(placeholder, file)
+    if os.path.isdir(pkg_info):
+        patched = _patch_egg_dir(pkg_info)
+    else:
+        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+    if not patched:
+        log.warn('%s already patched.', pkg_info)
+        return False
+    # now let's move the files out of the way
+    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+        element = os.path.join(placeholder, element)
+        if os.path.exists(element):
+            _rename_path(element)
+        else:
+            log.warn('Could not find the %s element of the '
+                     'Setuptools distribution', element)
+    return True
+
+_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
+def _after_install(dist):
+    log.warn('After install bootstrap.')
+    placeholder = dist.get_command_obj('install').install_purelib
+    _create_fake_setuptools_pkg_info(placeholder)
+
+def _create_fake_setuptools_pkg_info(placeholder):
+    if not placeholder or not os.path.exists(placeholder):
+        log.warn('Could not find the install location')
+        return
+    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+            (SETUPTOOLS_FAKED_VERSION, pyver)
+    pkg_info = os.path.join(placeholder, setuptools_file)
+    if os.path.exists(pkg_info):
+        log.warn('%s already exists', pkg_info)
+        return
+
+    log.warn('Creating %s', pkg_info)
+    f = open(pkg_info, 'w')
+    try:
+        f.write(SETUPTOOLS_PKG_INFO)
+    finally:
+        f.close()
+
+    pth_file = os.path.join(placeholder, 'setuptools.pth')
+    log.warn('Creating %s', pth_file)
+    f = open(pth_file, 'w')
+    try:
+        f.write(os.path.join(os.curdir, setuptools_file))
+    finally:
+        f.close()
+
+_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+
+def _patch_egg_dir(path):
+    # let's check if it's already patched
+    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+    if os.path.exists(pkg_info):
+        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+            log.warn('%s already patched.', pkg_info)
+            return False
+    _rename_path(path)
+    os.mkdir(path)
+    os.mkdir(os.path.join(path, 'EGG-INFO'))
+    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+    f = open(pkg_info, 'w')
+    try:
+        f.write(SETUPTOOLS_PKG_INFO)
+    finally:
+        f.close()
+    return True
+
+_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
+def _before_install():
+    log.warn('Before install bootstrap.')
+    _fake_setuptools()
+
+
+def _under_prefix(location):
+    if 'install' not in sys.argv:
+        return True
+    args = sys.argv[sys.argv.index('install')+1:]
+    for index, arg in enumerate(args):
+        for option in ('--root', '--prefix'):
+            if arg.startswith('%s=' % option):
+                top_dir = arg.split('root=')[-1]
+                return location.startswith(top_dir)
+            elif arg == option:
+                if len(args) > index:
+                    top_dir = args[index+1]
+                    return location.startswith(top_dir)
+        if arg == '--user' and USER_SITE is not None:
+            return location.startswith(USER_SITE)
+    return True
+
+
+def _fake_setuptools():
+    log.warn('Scanning installed packages')
+    try:
+        import pkg_resources
+    except ImportError:
+        # we're cool
+        log.warn('Setuptools or Distribute does not seem to be installed.')
+        return
+    ws = pkg_resources.working_set
+    try:
+        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+                                  replacement=False))
+    except TypeError:
+        # old distribute API
+        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+    if setuptools_dist is None:
+        log.warn('No setuptools distribution found')
+        return
+    # detecting if it was already faked
+    setuptools_location = setuptools_dist.location
+    log.warn('Setuptools installation detected at %s', setuptools_location)
+
+    # if --root or --preix was provided, and if
+    # setuptools is not located in them, we don't patch it
+    if not _under_prefix(setuptools_location):
+        log.warn('Not patching, --root or --prefix is installing Distribute'
+                 ' in another location')
+        return
+
+    # let's see if its an egg
+    if not setuptools_location.endswith('.egg'):
+        log.warn('Non-egg installation')
+        res = _remove_flat_installation(setuptools_location)
+        if not res:
+            return
+    else:
+        log.warn('Egg installation')
+        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+        if (os.path.exists(pkg_info) and
+            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+            log.warn('Already patched.')
+            return
+        log.warn('Patching...')
+        # let's create a fake egg replacing setuptools one
+        res = _patch_egg_dir(setuptools_location)
+        if not res:
+            return
+    log.warn('Patched done.')
+    _relaunch()
+
+
+def _relaunch():
+    log.warn('Relaunching...')
+    # we have to relaunch the process
+    # pip marker to avoid a relaunch bug
+    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+        sys.argv[0] = 'setup.py'
+    args = [sys.executable] + sys.argv
+    sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+    """Extract all members from the archive to the current working
+       directory and set owner, modification time and permissions on
+       directories afterwards. `path' specifies a different directory
+       to extract to. `members' is optional and must be a subset of the
+       list returned by getmembers().
+    """
+    import copy
+    import operator
+    from tarfile import ExtractError
+    directories = []
+
+    if members is None:
+        members = self
+
+    for tarinfo in members:
+        if tarinfo.isdir():
+            # Extract directories with a safe mode.
+            directories.append(tarinfo)
+            tarinfo = copy.copy(tarinfo)
+            tarinfo.mode = 448 # decimal for oct 0700
+        self.extract(tarinfo, path)
+
+    # Reverse sort directories.
+    if sys.version_info < (2, 4):
+        def sorter(dir1, dir2):
+            return cmp(dir1.name, dir2.name)
+        directories.sort(sorter)
+        directories.reverse()
+    else:
+        directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+    # Set correct owner, mtime and filemode on directories.
+    for tarinfo in directories:
+        dirpath = os.path.join(path, tarinfo.name)
+        try:
+            self.chown(tarinfo, dirpath)
+            self.utime(tarinfo, dirpath)
+            self.chmod(tarinfo, dirpath)
+        except ExtractError:
+            e = sys.exc_info()[1]
+            if self.errorlevel > 1:
+                raise
+            else:
+                self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+    """Install or upgrade setuptools and EasyInstall"""
+    tarball = download_setuptools()
+    _install(tarball)
+
+
+if __name__ == '__main__':
+    main(sys.argv[1:])
diff --git a/doc/.static/nose.css b/doc/.static/nose.css
new file mode 100644 (file)
index 0000000..38602e2
--- /dev/null
@@ -0,0 +1,74 @@
+@import url(default.css);
+
+body {
+    padding-left: 20px;
+    background-color: #fff;
+    font: x-small Georgia,Serif;
+    font-size/* */:/**/small;
+    font-size: /**/small;
+}
+
+div.body { border-right: 1px solid #ccc; }
+
+div.body h1 { margin-top: 0; font-size: 130%; margin-right: 0px; }
+div.body h2 { font-size: 120%; }
+div.body h3 { font-size: 115%; }
+div.body h4 { font-size: 110%; }
+div.body h5 { font-size: 106%; }
+div.body h6 { font-size: 103%; }
+
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+    border: none;
+    background-color: #fff;
+}
+
+.new {
+       color: #f00;
+       font-weight: bold;
+}
+
+pre, tt {
+    background-color: #ffe;
+}
+
+div.body h1.big {
+    font-family: courier, "courier new", monospace;
+    font-weight: bold;
+    font-size: 800%;
+    background-color: #fff;
+    margin: 0px -20px 0px -25px;
+    padding: 0px 0px 0px 10px;
+    border: none;
+    color: #000;
+}
+
+div.body h2.big {
+    font-weight: bold;
+    margin: -10px -20px 0px -20px;
+}
+
+p.big {
+    font-family: 'Trebuchet MS', sans-serif;
+    font-weight: bold;
+    font-size: 200%;
+    margin-left: -20px;
+    padding-left: 10px;
+}
+
+span.biglink {
+    font-size: 1.3em
+}
+
+table.contentstable td {
+    vertical-align: top
+    padding-left: 0px;
+    padding-right: 10px;
+}
+
+table.contentstable td p {
+    text-align: left;
+}
\ No newline at end of file
diff --git a/doc/.templates/index.html b/doc/.templates/index.html
new file mode 100644 (file)
index 0000000..affb29e
--- /dev/null
@@ -0,0 +1,52 @@
+<h1 class="big">nose</h1>
+<h2 class="big">is nicer testing for python</h2>
+
+<p class="big">nose extends unittest to make testing easier.</p>
+
+{{ body }}
+
+<h1>Documentation</h1>
+
+<table class="contentstable">
+  <tr>
+    <td width="50%">
+      <a class="biglink" href="{{ pathto("testing") }}">
+        Testing with nose</a>
+      <p>Find out how to write, find and run tests using nose.<br />
+      <a href="{{ pathto("testing") }}">More &gt;</a></p>
+    </td>
+    <td width="50%">
+      <a class="biglink" href="{{ pathto("developing") }}">
+        Developing with nose</a>
+      <p>Find out how to write your own plugins, and about nose
+        internals.<br/> <a href="{{ pathto("developing") }}">More &gt;</a></p>
+    </td>
+  </tr>
+  <tr>
+    <td>
+      <a class="biglink" href="{{ pathto("news") }}">
+        News</a>
+      <p>What's new in this release?<br/>
+        <a href="{{ pathto("news") }}">More &gt;</a></p>
+    </td>
+    <td>
+      <a class="biglink" href="{{ pathto("further_reading") }}">
+        Further reading</a>
+      <p>Plugin recipes and usage examples, trivia and other
+        uncategorizable items.<br/>
+        <a href="{{ pathto("further_reading") }}">More &gt;</a></p>
+    </td>
+  </tr>
+  <tr>
+    <td>
+      <span class="biglink">Indices and tables</span>
+      <ul>
+        <li><a href="{{ pathto("genindex") }}">
+            Complete index</a></li>
+        <li><a href="{{ pathto("modindex") }}">
+            Index of documented modules</a></li>
+      </ul>
+    </td>
+    <td></td>
+  </tr>
+</table>
diff --git a/doc/.templates/indexsidebar.html b/doc/.templates/indexsidebar.html
new file mode 100644 (file)
index 0000000..cc1aea8
--- /dev/null
@@ -0,0 +1,56 @@
+<h3><a href="http://pypi.python.org/pypi/nose/">Download</a></h3>
+<ul>
+  <li><a href="http://pypi.python.org/pypi/nose/">
+      Current version: {{ release }}</a>
+  </li>
+</ul>
+
+<h3>Install</h3>
+<ul>
+  <li>This release:<br/>
+    <tt>easy_install nose=={{ release }}</tt></li>
+  <li>Development (unstable):<br/>
+    <tt>easy_install nose==dev</tt>
+  </li>
+</ul>
+
+<h3>Community</h3>
+<ul>
+  <li><a href="http://groups.google.com/group/nose-announce">
+    Announcement list</a>
+    <ul><li>Sign up to receive email announcements
+        of new releases</li></ul>
+  </li>
+  <li><a href="http://groups.google.com/group/nose-users">
+    Users' discussion list</a>
+    <ul><li>Talk about using nose. Get help. Give help!</li></ul>
+  </li>
+  <li><a href="http://lists.idyll.org/listinfo/testing-in-python">
+      TIP list</a>
+    <ul><li>The Testing In Python list features wide-ranging
+        discussions of all topics of interest to python
+        testers.</li></ul>
+  </li>
+</ul>
+
+<h3><a href="http://code.google.com/p/python-nose/">Tracker</a></h3>
+<ul><li>Report bugs, request features, wik the wiki, browse source.</li></ul>
+
+<h3>Other links</h3>
+<ul>
+  <li>
+    <a href="http://codespeak.net/py/current/doc/test.html">py.test</a>
+  </li>
+  <li>
+    <a href="http://peak.telecommunity.com/DevCenter/setuptools">
+      setuptools</a>
+  </li>
+</ul>
+
+<h3>Older versions</h3>
+<ul>
+  <li><a href="../0.10.4/">nose 0.10, the previous major
+      release.</a></li>
+  <li><a href="../0.9.3/">nose 0.9.3, a very old version that you
+      shouldn't use.</a></li>
+</ul>
diff --git a/doc/.templates/layout.html b/doc/.templates/layout.html
new file mode 100644 (file)
index 0000000..e49c1e9
--- /dev/null
@@ -0,0 +1,16 @@
+{% extends "!layout.html" %}
+
+{%- block relbar1 %}
+{% if pagename != 'index' %}{{ super() }}{% endif %}
+{% endblock %}
+
+{%- block footer %}
+{{ super() }}
+<script src="http://www.google-analytics.com/urchin.js" 
+        type="text/javascript">
+</script>
+<script type="text/javascript">
+  _uacct = "UA-2236166-1";
+  urchinTracker();
+</script>
+{%- endblock %}
diff --git a/doc/.templates/page.html b/doc/.templates/page.html
new file mode 100644 (file)
index 0000000..3416437
--- /dev/null
@@ -0,0 +1,7 @@
+{% extends "!page.html" %}
+{% block body %}
+{% if pagename == 'index' %}
+{% include "index.html" %}
+{% else %}
+{{ super() }}
+{% endif %}{% endblock %}
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644 (file)
index 0000000..2a1d23b
--- /dev/null
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d .build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck man readme
+
+help:
+       @echo "Please use \`make <target>' where <target> is one of"
+       @echo "  html      to make standalone HTML files"
+       @echo "  pickle    to make pickle files"
+       @echo "  json      to make JSON files"
+       @echo "  htmlhelp  to make HTML files and a HTML help project"
+       @echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+       @echo "  changes   to make an overview over all changed/added/deprecated items"
+       @echo "  linkcheck to check all external links for integrity"
+
+clean:
+       -rm -rf .build/*
+
+html:
+       mkdir -p .build/html .build/doctrees
+       $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .build/html
+       @echo
+       @echo "Build finished. The HTML pages are in .build/html."
+
+pickle:
+       mkdir -p .build/pickle .build/doctrees
+       $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) .build/pickle
+       @echo
+       @echo "Build finished; now you can process the pickle files."
+
+web: pickle
+
+json:
+       mkdir -p .build/json .build/doctrees
+       $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) .build/json
+       @echo
+       @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+       mkdir -p .build/htmlhelp .build/doctrees
+       $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) .build/htmlhelp
+       @echo
+       @echo "Build finished; now you can run HTML Help Workshop with the" \
+             ".hhp project file in .build/htmlhelp."
+
+latex:
+       mkdir -p .build/latex .build/doctrees
+       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) .build/latex
+       @echo
+       @echo "Build finished; the LaTeX files are in .build/latex."
+       @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+             "run these through (pdf)latex."
+
+changes:
+       mkdir -p .build/changes .build/doctrees
+       $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) .build/changes
+       @echo
+       @echo "The overview file is in .build/changes."
+
+linkcheck:
+       mkdir -p .build/linkcheck .build/doctrees
+       $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) .build/linkcheck
+       @echo
+       @echo "Link check complete; look for any errors in the above output " \
+             "or in .build/linkcheck/output.txt."
+
+man:
+       mkdir -p .build/man ./build/doctrees
+       $(SPHINXBUILD) -b manpage $(ALLSPHINXOPTS) .build/man man.rst
+       cp .build/man/man.man ../nosetests.1
+       @echo
+       @echo "Generated man page copied to ../nosetests.1"
+
+readme:
+       mkdir -p .build/text .build/doctrees$
+       $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) .build/text usage.rst
+       cp .build/text/usage.txt ../README.txt
+       @echo
+       @echo "Updated ../README.txt"
diff --git a/doc/api.rst b/doc/api.rst
new file mode 100644 (file)
index 0000000..b2dd665
--- /dev/null
@@ -0,0 +1,20 @@
+nose internals
+==============
+
+.. toctree ::
+   :maxdepth: 2
+
+   api/core
+   api/loader
+   api/selector
+   api/config
+   api/test_cases
+   api/suite
+   api/result
+   api/proxy
+   api/plugin_manager   
+   api/importer
+   api/commands
+   api/twistedtools
+   api/inspector
+   api/util
diff --git a/doc/api/commands.rst b/doc/api/commands.rst
new file mode 100644 (file)
index 0000000..c9ae14a
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.commands
+   :members:
\ No newline at end of file
diff --git a/doc/api/config.rst b/doc/api/config.rst
new file mode 100644 (file)
index 0000000..077cf9b
--- /dev/null
@@ -0,0 +1,5 @@
+Configuration
+=============
+
+.. automodule :: nose.config
+   :members:
\ No newline at end of file
diff --git a/doc/api/core.rst b/doc/api/core.rst
new file mode 100644 (file)
index 0000000..6e58329
--- /dev/null
@@ -0,0 +1,5 @@
+Test runner and main()
+======================
+
+.. automodule :: nose.core
+   :members:
diff --git a/doc/api/importer.rst b/doc/api/importer.rst
new file mode 100644 (file)
index 0000000..956fdb3
--- /dev/null
@@ -0,0 +1,5 @@
+Importer
+========
+
+.. automodule :: nose.importer
+   :members:
\ No newline at end of file
diff --git a/doc/api/inspector.rst b/doc/api/inspector.rst
new file mode 100644 (file)
index 0000000..e204985
--- /dev/null
@@ -0,0 +1,5 @@
+Traceback inspector
+===================
+
+.. automodule :: nose.inspector
+   :members:
\ No newline at end of file
diff --git a/doc/api/loader.rst b/doc/api/loader.rst
new file mode 100644 (file)
index 0000000..741dd22
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.loader
+   :members:
\ No newline at end of file
diff --git a/doc/api/plugin_manager.rst b/doc/api/plugin_manager.rst
new file mode 100644 (file)
index 0000000..5c1e393
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.plugins.manager
+   :members:
\ No newline at end of file
diff --git a/doc/api/proxy.rst b/doc/api/proxy.rst
new file mode 100644 (file)
index 0000000..1802074
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.proxy
+   :members:
\ No newline at end of file
diff --git a/doc/api/result.rst b/doc/api/result.rst
new file mode 100644 (file)
index 0000000..75b110d
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.result
+   :members:
\ No newline at end of file
diff --git a/doc/api/selector.rst b/doc/api/selector.rst
new file mode 100644 (file)
index 0000000..d3de5a4
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.selector
+   :members:
\ No newline at end of file
diff --git a/doc/api/suite.rst b/doc/api/suite.rst
new file mode 100644 (file)
index 0000000..9b764b0
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.suite
+   :members:
\ No newline at end of file
diff --git a/doc/api/test_cases.rst b/doc/api/test_cases.rst
new file mode 100644 (file)
index 0000000..2508f54
--- /dev/null
@@ -0,0 +1,8 @@
+Test Cases
+==========
+
+.. automodule :: nose.case
+   :members:
+
+.. autoclass :: nose.failure.Failure
+   :members:
diff --git a/doc/api/twistedtools.rst b/doc/api/twistedtools.rst
new file mode 100644 (file)
index 0000000..584d9c7
--- /dev/null
@@ -0,0 +1,2 @@
+.. automodule :: nose.twistedtools
+   :members:
\ No newline at end of file
diff --git a/doc/api/util.rst b/doc/api/util.rst
new file mode 100644 (file)
index 0000000..f4b683e
--- /dev/null
@@ -0,0 +1,5 @@
+Utility functions
+=================
+
+.. automodule :: nose.util
+   :members:
\ No newline at end of file
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644 (file)
index 0000000..3fb4326
--- /dev/null
@@ -0,0 +1,237 @@
+# -*- coding: utf-8 -*-
+#
+# nose documentation build configuration file, created by
+# sphinx-quickstart on Thu Mar 26 16:49:00 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+
+# need to be brutal because of easy_install's pth hacks:
+sys.path.insert(0, 
+    os.path.join(os.path.dirname(__file__), '..'))
+sys.path.insert(0, os.path.abspath('.'))
+
+
+# General configuration
+# ---------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
+              'nose.sphinx.pluginopts', 'manbuilder']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'nose'
+copyright = u'2009, Jason Pellerin'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.1'
+# The full version, including alpha/beta/rc tags.
+release = '1.1.2'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['.build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'trac'
+
+
+# Options for HTML output
+# -----------------------
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'nose.css'
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {
+    'index': 'indexsidebar.html'
+    }
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {'index': 'index.html'}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+#html_copy_source = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'nosedoc'
+
+"""
+footerbgcolor (CSS color): Background color for the footer line.
+footertextcolor (CSS color): Text color for the footer line.
+sidebarbgcolor (CSS color): Background color for the sidebar.
+sidebartextcolor (CSS color): Text color for the sidebar.
+sidebarlinkcolor (CSS color): Link color for the sidebar.
+relbarbgcolor (CSS color): Background color for the relation bar.
+relbartextcolor (CSS color): Text color for the relation bar.
+relbarlinkcolor (CSS color): Link color for the relation bar.
+bgcolor (CSS color): Body background color.
+textcolor (CSS color): Body text color.
+linkcolor (CSS color): Body link color.
+headbgcolor (CSS color): Background color for headings.
+headtextcolor (CSS color): Text color for headings.
+headlinkcolor (CSS color): Link color for headings.
+codebgcolor (CSS color): Background color for code blocks.
+codetextcolor (CSS color): Default text color for code blocks, if not set differently by the highlighting style.
+bodyfont (CSS font-family): Font for normal text.
+headfont (CSS font-family): Font for headings.
+"""
+html_theme_options = {
+    'rightsidebar': 'true',
+    'sidebarbgcolor': '#fff',
+    'sidebartextcolor': '#20435c',
+    'sidebarlinkcolor': '#355f7c',
+    'bgcolor': '#fff',
+    'codebgcolor': '#ffe',
+    'headbgcolor': '#fff',
+    'relbarbgcolor': '#fff',
+    'relbartextcolor': '#20435c',
+    'relbarlinkcolor': '#355f7c',
+}
+
+# the css mostly overrides this:
+html_theme = 'default'
+    
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class [howto/manual]).
+latex_documents = [
+  ('index', 'nose.tex', ur'nose Documentation',
+   ur'Jason Pellerin', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/dev': None}
diff --git a/doc/contributing.rst b/doc/contributing.rst
new file mode 100644 (file)
index 0000000..1a74ca8
--- /dev/null
@@ -0,0 +1,58 @@
+Contributing to nose
+====================
+
+You'd like to contribute to nose? Great! Now that nose is hosted under
+`Mercurial <http://selenic.com/mercurial/>`__, contributing is even easier.
+
+Get the code!
+-------------
+
+Start by getting a local working copy of nose, either stable, from google code::
+
+  hg clone http://python-nose.googlecode.com/hg/ nose-stable
+
+or unstable, from bitbucket::
+
+  hg clone http://bitbucket.org/jpellerin/nose/ nose-unstable
+
+If you plan to submit changes back to the core repository, you should set up a
+public repository of your own somewhere. `Bitbucket <http://bitbucket.org>`__
+is a good place to do that. Once you've set up your bitbucket nose repository,
+if working from **stable**, pull from your working copy of nose-stable, and push
+to bitbucket. That (with occasional merging) will be your normal practice for
+keeping your repository up to date. If you're on bitbucket and working from
+**unstable**, just **fork** http://bitbucket.org/jpellerin/nose/.
+
+Running nose's tests
+--------------------
+
+nose runs its own test suite with `tox
+<http://codespeak.net/tox/>`. You don't have to install tox to run
+nose's test suite, but you should, because tox makes it easy to run
+all tests on all supported python versions. You'll also need python
+2.4, 2.5, 2.6, 2.7, 3.1 and jython installed somewhere in your $PATH.
+
+Discuss
+-------
+
+Join the `nose developer list
+<http://groups.google.com/group/nose-dev>`__ at google groups. It's
+low-traffic and mostly signal.
+
+What to work on?
+----------------
+
+You can find a list of open issues at nose's `google code repository
+<http://code.google.com/p/python-nose/issues>`__. If you'd like to
+work on an issue, leave a comment on the issue detailing how you plan
+to fix it, and where to find the Mercurial repository where you will
+publish your changes.
+
+I have a great idea for a plugin...
+-----------------------------------
+
+Great! :doc:`Write it <plugins/writing>`. Release it on `pypi
+<http://pypi.python.org>`__. If it gains a large following, and
+becomes stable enough to work with nose's 6+ month release cycles, it
+may be a good candidate for inclusion in nose's builtin plugins.
+
diff --git a/doc/developing.rst b/doc/developing.rst
new file mode 100644 (file)
index 0000000..3302ddf
--- /dev/null
@@ -0,0 +1,30 @@
+Developing with nose
+====================
+
+Get the code
+------------
+
+The stable branch of nose is hosted at `google code
+<http://code.google.com/p/python-nose/>`__. You should clone this
+branch if you're developing a plugin or working on bug fixes for nose::
+
+  hg clone http://python-nose.googlecode.com/hg/ nose-stable
+
+The **unstable** branch of nose is hosted at `bitbucket
+<http://bitbucket.org/jpellerin/nose/>`__. You should **fork** this branch if
+you are developing new features for nose. Then clone your fork, and submit
+your changes as a pull request. If you just want to use unstable, you can
+clone the branch::
+
+  hg clone http://bitbucket.org/jpellerin/nose/ nose-unstable
+
+
+Read
+----
+
+.. toctree ::
+   :maxdepth: 2
+
+   plugins
+   api
+   contributing
diff --git a/doc/doc_tests/test_addplugins/support/test$py.class b/doc/doc_tests/test_addplugins/support/test$py.class
new file mode 100644 (file)
index 0000000..f4dea0b
Binary files /dev/null and b/doc/doc_tests/test_addplugins/support/test$py.class differ
diff --git a/doc/doc_tests/test_addplugins/support/test.py b/doc/doc_tests/test_addplugins/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/doc/doc_tests/test_addplugins/support/test.pyc b/doc/doc_tests/test_addplugins/support/test.pyc
new file mode 100644 (file)
index 0000000..0e4b6ad
Binary files /dev/null and b/doc/doc_tests/test_addplugins/support/test.pyc differ
diff --git a/doc/doc_tests/test_addplugins/test_addplugins.rst b/doc/doc_tests/test_addplugins/test_addplugins.rst
new file mode 100644 (file)
index 0000000..8e8da91
--- /dev/null
@@ -0,0 +1,80 @@
+Using custom plugins without setuptools
+---------------------------------------
+
+If you have one or more custom plugins that you'd like to use with nose, but
+can't or don't want to register that plugin as a setuptools entrypoint, you
+can use the ``addplugins`` keyword argument to :func:`nose.core.main` or
+:func:`nose.core.run` to make the plugins available.
+
+To do this you would construct a launcher script for nose, something like::
+
+  from nose import main
+  from yourpackage import YourPlugin, YourOtherPlugin
+
+  if __name__ == '__main__':
+      nose.main(addplugins=[YourPlugin(), YourOtherPlugin()])
+
+Here's an example. Say that you don't like the fact that the collect-only
+plugin outputs 'ok' for each test it finds; instead you want it to output
+'maybe.' You could modify the plugin itself, or instead, create a Maybe plugin
+that transforms the output into your desired shape.
+
+Without the plugin, we get 'ok.'
+
+>>> import os
+>>> support = os.path.join(os.path.dirname(__file__), 'support')
+>>> from nose.plugins.plugintest import run_buffered as run
+>>> argv = [__file__, '-v', support] # --collect-only
+>>> run(argv=argv)
+test.test ... ok
+<BLANKLINE>
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+Without '-v', we get a dot.
+
+>>> run(argv=[__file__, support])
+.
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+The plugin is simple. It captures and wraps the test result output stream and
+replaces 'ok' with 'maybe' and '.' with '?'.
+
+>>> from nose.plugins.base import Plugin
+>>> class Maybe(Plugin):
+...     def setOutputStream(self, stream):
+...         self.stream = stream
+...         return self
+...     def flush(self):
+...         self.stream.flush()
+...     def writeln(self, out=""):
+...         self.write(out + "\n")
+...     def write(self, out):
+...         if out == "ok\n":
+...             out = "maybe\n"
+...         elif out == ".":
+...             out = "?"
+...         self.stream.write(out)
+
+To activate the plugin, we pass an instance in the addplugins list.
+
+>>> run(argv=argv + ['--with-maybe'], addplugins=[Maybe()])
+test.test ... maybe
+<BLANKLINE>
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+>>> run(argv=[__file__, support, '--with-maybe'], addplugins=[Maybe()])
+?
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
diff --git a/doc/doc_tests/test_allmodules/support/mod$py.class b/doc/doc_tests/test_allmodules/support/mod$py.class
new file mode 100644 (file)
index 0000000..6322627
Binary files /dev/null and b/doc/doc_tests/test_allmodules/support/mod$py.class differ
diff --git a/doc/doc_tests/test_allmodules/support/mod.py b/doc/doc_tests/test_allmodules/support/mod.py
new file mode 100644 (file)
index 0000000..e136d56
--- /dev/null
@@ -0,0 +1,5 @@
+def test():
+    pass
+
+def test_fails():
+    assert False, "This test fails"
diff --git a/doc/doc_tests/test_allmodules/support/mod.pyc b/doc/doc_tests/test_allmodules/support/mod.pyc
new file mode 100644 (file)
index 0000000..17ebf6e
Binary files /dev/null and b/doc/doc_tests/test_allmodules/support/mod.pyc differ
diff --git a/doc/doc_tests/test_allmodules/support/test$py.class b/doc/doc_tests/test_allmodules/support/test$py.class
new file mode 100644 (file)
index 0000000..2d22e0d
Binary files /dev/null and b/doc/doc_tests/test_allmodules/support/test$py.class differ
diff --git a/doc/doc_tests/test_allmodules/support/test.py b/doc/doc_tests/test_allmodules/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/doc/doc_tests/test_allmodules/support/test.pyc b/doc/doc_tests/test_allmodules/support/test.pyc
new file mode 100644 (file)
index 0000000..5648105
Binary files /dev/null and b/doc/doc_tests/test_allmodules/support/test.pyc differ
diff --git a/doc/doc_tests/test_allmodules/test_allmodules.rst b/doc/doc_tests/test_allmodules/test_allmodules.rst
new file mode 100644 (file)
index 0000000..b541987
--- /dev/null
@@ -0,0 +1,67 @@
+Finding tests in all modules
+============================
+
+Normally, nose only looks for tests in modules whose names match testMatch. By
+default that means modules with 'test' or 'Test' at the start of the name
+after an underscore (_) or dash (-) or other non-alphanumeric character.
+
+If you want to collect tests from all modules, use the ``--all-modules``
+command line argument to activate the :doc:`allmodules plugin
+<../../plugins/allmodules>`.
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> argv = [__file__, '-v', support]
+
+The target directory contains a test module and a normal module.
+
+    >>> support_files = [d for d in os.listdir(support)
+    ...                  if not d.startswith('.')
+    ...                  and d.endswith('.py')]
+    >>> support_files.sort()
+    >>> support_files
+    ['mod.py', 'test.py']
+
+When run without ``--all-modules``, only the test module is examined for tests.
+
+    >>> run(argv=argv)
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+When ``--all-modules`` is active, both modules are examined.
+
+    >>> from nose.plugins.allmodules import AllModules
+    >>> argv = [__file__, '-v', '--all-modules', support]
+    >>> run(argv=argv, plugins=[AllModules()]) # doctest: +REPORT_NDIFF
+    mod.test ... ok
+    mod.test_fails ... FAIL
+    test.test ... ok
+    <BLANKLINE>
+    ======================================================================
+    FAIL: mod.test_fails
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AssertionError: This test fails
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    FAILED (failures=1)
+
+
+
diff --git a/doc/doc_tests/test_coverage_html/coverage_html.rst b/doc/doc_tests/test_coverage_html/coverage_html.rst
new file mode 100644 (file)
index 0000000..95f9e8a
--- /dev/null
@@ -0,0 +1,57 @@
+Generating HTML Coverage with nose
+----------------------------------
+
+.. Note ::
+
+    HTML coverage requires Ned Batchelder's `coverage.py`_ module.
+..
+
+Console coverage output is useful but terse. For a more browseable view of
+code coverage, the coverage plugin supports basic HTML coverage output.
+
+.. hide this from the actual documentation:
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> cover_html_dir = os.path.join(support, 'cover')
+    >>> cover_file = os.path.join(os.getcwd(), '.coverage')
+    >>> if os.path.exists(cover_file):
+    ...     os.unlink(cover_file)
+    ...
+
+
+The console coverage output is printed, as normal.
+
+    >>> from nose.plugins.cover import Coverage
+    >>> cover_html_dir = os.path.join(support, 'cover')
+    >>> run(argv=[__file__, '-v', '--with-coverage', '--cover-package=blah', 
+    ...           '--cover-html', '--cover-html-dir=' + cover_html_dir,
+    ...           support, ], 
+    ...     plugins=[Coverage()]) # doctest: +REPORT_NDIFF
+    test_covered.test_blah ... hi
+    ok
+    <BLANKLINE>
+    Name    Stmts   Miss  Cover   Missing
+    -------------------------------------
+    blah        4      1    75%   6
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+The html coverage reports are saved to disk in the directory specified by the
+``--cover-html-dir`` option. In that directory you'll find ``index.html``
+which links to a detailed coverage report for each module in the report. The
+detail pages show the module source, colorized to indicated which lines are
+covered and which are not. There is an example of this HTML output in the
+`coverage.py`_ docs.
+
+.. hide this from the actual documentation:
+    >>> os.path.exists(cover_file)
+    True
+    >>> os.path.exists(os.path.join(cover_html_dir, 'index.html'))
+    True
+    >>> os.path.exists(os.path.join(cover_html_dir, 'blah.html'))
+    True
+
+.. _`coverage.py`: http://nedbatchelder.com/code/coverage/
diff --git a/doc/doc_tests/test_coverage_html/coverage_html.rst.py3.patch b/doc/doc_tests/test_coverage_html/coverage_html.rst.py3.patch
new file mode 100644 (file)
index 0000000..f325a01
--- /dev/null
@@ -0,0 +1,16 @@
+--- coverage_html.rst.orig     2010-08-31 23:13:33.000000000 -0700
++++ coverage_html.rst  2010-08-31 23:14:25.000000000 -0700
+@@ -78,11 +78,11 @@
+     </div>
+     <div class="coverage">
+     <div class="cov"><span class="num"><pre>1</pre></span><pre>def dostuff():</pre></div>
+-    <div class="cov"><span class="num"><pre>2</pre></span><pre>    print 'hi'</pre></div>
++    <div class="cov"><span class="num"><pre>2</pre></span><pre>    print('hi')</pre></div>
+     <div class="skip"><span class="num"><pre>3</pre></span><pre></pre></div>
+     <div class="skip"><span class="num"><pre>4</pre></span><pre></pre></div>
+     <div class="cov"><span class="num"><pre>5</pre></span><pre>def notcov():</pre></div>
+-    <div class="nocov"><span class="num"><pre>6</pre></span><pre>    print 'not covered'</pre></div>
++    <div class="nocov"><span class="num"><pre>6</pre></span><pre>    print('not covered')</pre></div>
+     <div class="skip"><span class="num"><pre>7</pre></span><pre></pre></div>
+     </div>
+     </body>
diff --git a/doc/doc_tests/test_coverage_html/coverage_html_fixtures$py.class b/doc/doc_tests/test_coverage_html/coverage_html_fixtures$py.class
new file mode 100644 (file)
index 0000000..79f8015
Binary files /dev/null and b/doc/doc_tests/test_coverage_html/coverage_html_fixtures$py.class differ
diff --git a/doc/doc_tests/test_coverage_html/coverage_html_fixtures.py b/doc/doc_tests/test_coverage_html/coverage_html_fixtures.py
new file mode 100644 (file)
index 0000000..6829dc2
--- /dev/null
@@ -0,0 +1,26 @@
+import sys
+import os
+import shutil
+from nose.plugins.skip import SkipTest
+from nose.plugins.cover import Coverage
+from nose.plugins.plugintest import munge_nose_output_for_doctest
+
+# This fixture is not reentrant because we have to cleanup the files that
+# coverage produces once all tests have finished running.
+_multiprocess_shared_ = True
+
+def setup_module():
+    try:
+        import coverage
+        if 'active' in Coverage.status:
+            raise SkipTest("Coverage plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("coverage module not available")
+
+def teardown_module():
+    # Clean up the files produced by coverage
+    cover_html_dir = os.path.join(os.path.dirname(__file__), 'support', 'cover')
+    if os.path.exists(cover_html_dir):
+        shutil.rmtree(cover_html_dir)
+
diff --git a/doc/doc_tests/test_coverage_html/coverage_html_fixtures.pyc b/doc/doc_tests/test_coverage_html/coverage_html_fixtures.pyc
new file mode 100644 (file)
index 0000000..00eee1b
Binary files /dev/null and b/doc/doc_tests/test_coverage_html/coverage_html_fixtures.pyc differ
diff --git a/doc/doc_tests/test_coverage_html/support/blah.py b/doc/doc_tests/test_coverage_html/support/blah.py
new file mode 100644 (file)
index 0000000..ef6657c
--- /dev/null
@@ -0,0 +1,6 @@
+def dostuff():
+    print 'hi'
+
+
+def notcov():
+    print 'not covered'
diff --git a/doc/doc_tests/test_coverage_html/support/blah.pyc b/doc/doc_tests/test_coverage_html/support/blah.pyc
new file mode 100644 (file)
index 0000000..923a02c
Binary files /dev/null and b/doc/doc_tests/test_coverage_html/support/blah.pyc differ
diff --git a/doc/doc_tests/test_coverage_html/support/tests/test_covered.py b/doc/doc_tests/test_coverage_html/support/tests/test_covered.py
new file mode 100644 (file)
index 0000000..c669c5c
--- /dev/null
@@ -0,0 +1,4 @@
+import blah
+
+def test_blah():
+    blah.dostuff()
diff --git a/doc/doc_tests/test_coverage_html/support/tests/test_covered.pyc b/doc/doc_tests/test_coverage_html/support/tests/test_covered.pyc
new file mode 100644 (file)
index 0000000..b4d933c
Binary files /dev/null and b/doc/doc_tests/test_coverage_html/support/tests/test_covered.pyc differ
diff --git a/doc/doc_tests/test_doctest_fixtures/doctest_fixtures.rst b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures.rst
new file mode 100644 (file)
index 0000000..6ff8fed
--- /dev/null
@@ -0,0 +1,122 @@
+Doctest Fixtures
+----------------
+
+Doctest files, like other tests, can be made more efficient or meaningful or
+at least easier to write by judicious use of fixtures. nose supports limited
+fixtures for use with doctest files. 
+
+Module-level fixtures
+=====================
+
+Fixtures for a doctest file may define any or all of the following methods for
+module-level setup:
+
+* setup
+* setup_module
+* setupModule
+* setUpModule
+
+Each module-level setup function may optionally take a single argument, the
+fixtures module itself.
+
+Example::
+
+  def setup_module(module):
+      module.called[:] = []
+
+Similarly, module-level teardown methods are available, which also optionally
+take the fixtures module as an argument:
+      
+* teardown
+* teardown_module
+* teardownModule
+* tearDownModule
+
+Example::
+
+  def teardown_module(module):
+      module.called[:] = []
+      module.done = True
+
+Module-level setup executes **before any tests are loaded** from the doctest
+file. This is the right place to raise :class:`nose.plugins.skip.SkipTest`,
+for example.
+      
+Test-level fixtures
+===================
+
+In addition to module-level fixtures, *test*-level fixtures are
+supported. Keep in mind that in the doctest lexicon, the *test* is the *entire
+doctest file* -- not each individual example within the file. So, like the
+module-level fixtures, test-level fixtures execute *once per file*. The
+differences are that:
+
+- test-level fixtures execute **after** tests have been loaded, but **before**
+  any tests have executed.
+- test-level fixtures receive the doctest :class:`doctest.DocFileCase` loaded
+  from the file as their one *required* argument.
+      
+**setup_test(test)** is called before the test is run.
+
+Example::
+
+  def setup_test(test):
+      called.append(test)
+      test.globs['count'] = len(called)
+  setup_test.__test__ = False
+      
+**teardown_test(test)** is alled after the test, unless setup raised an
+uncaught exception. The argument is the :class:`doctest.DocFileCase` object,
+*not* a unittest.TestCase.
+
+Example::
+
+  def teardown_test(test):
+      pass
+  teardown_test.__test__ = False
+  
+Bottom line: setup_test, teardown_test have access to the *doctest test*,
+while setup, setup_module, etc have access to the *fixture*
+module. setup_module runs before tests are loaded, setup_test after.
+
+.. note ::
+
+   As in the examples, it's a good idea to tag your setup_test/teardown_test
+   functions with ``__test__ = False`` to avoid them being collected as tests.
+
+Lastly, the fixtures for a doctest file may supply a **globs(globs)**
+function. The dict returned by this function will be passed to the doctest
+runner as the globals available to the test. You can use this, for example, to
+easily inject a module's globals into a doctest that has been moved from the
+module to a separate file. 
+
+Example
+=======
+
+This doctest has some simple fixtures:
+
+.. include :: doctest_fixtures_fixtures.py
+   :literal:
+
+The ``globs`` defined in the fixtures make the variable ``something``
+available in all examples.
+   
+    >>> something
+    'Something?'
+
+The ``count`` variable is injected by the test-level fixture.
+    
+    >>> count
+    1
+
+.. warning ::
+
+  This whole file is one doctest test. setup_test doesn't do what you think!
+  It exists to give you access to the test case and examples, but it runs
+  *once*, before all of them, not before each.
+
+    >>> count
+    1
+
+  Thus, ``count`` stays 1 throughout the test, no matter how many examples it
+  includes.
\ No newline at end of file
diff --git a/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class
new file mode 100644 (file)
index 0000000..eeeff8a
Binary files /dev/null and b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class differ
diff --git a/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py
new file mode 100644 (file)
index 0000000..72fbfd7
--- /dev/null
@@ -0,0 +1,17 @@
+called = []
+
+def globs(globs):
+    globs['something'] = 'Something?'
+    return globs
+
+def setup_module(module):
+    module.called[:] = []
+
+def setup_test(test):
+    called.append(test)
+    test.globs['count'] = len(called)
+setup_test.__test__ = False
+    
+def teardown_test(test):
+    pass
+teardown_test.__test__ = False
diff --git a/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc
new file mode 100644 (file)
index 0000000..bf8a880
Binary files /dev/null and b/doc/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc differ
diff --git a/doc/doc_tests/test_init_plugin/example.cfg b/doc/doc_tests/test_init_plugin/example.cfg
new file mode 100644 (file)
index 0000000..b02ac0e
--- /dev/null
@@ -0,0 +1,3 @@
+[DEFAULT]
+can_frobnicate = 1
+likes_cheese = 0
diff --git a/doc/doc_tests/test_init_plugin/init_plugin.rst b/doc/doc_tests/test_init_plugin/init_plugin.rst
new file mode 100644 (file)
index 0000000..6c64029
--- /dev/null
@@ -0,0 +1,164 @@
+Running Initialization Code Before the Test Run
+-----------------------------------------------
+
+Many applications, especially those using web frameworks like Pylons_
+or Django_, can't be tested without first being configured or
+otherwise initialized. Plugins can fulfill this requirement by
+implementing :meth:`begin() <nose.plugins.base.IPluginInterface.begin>`.
+
+In this example, we'll use a very simple example: a widget class that
+can't be tested without a configuration.
+
+Here's the widget class. It's configured at the class or instance
+level by setting the ``cfg`` attribute to a dictionary.
+
+    >>> class ConfigurableWidget(object):
+    ...     cfg = None
+    ...     def can_frobnicate(self):
+    ...         return self.cfg.get('can_frobnicate', True)
+    ...     def likes_cheese(self):
+    ...         return self.cfg.get('likes_cheese', True)
+
+The tests verify that the widget's methods can be called without
+raising any exceptions.
+
+    >>> import unittest
+    >>> class TestConfigurableWidget(unittest.TestCase):
+    ...     longMessage = False
+    ...     def setUp(self):
+    ...         self.widget = ConfigurableWidget()
+    ...     def test_can_frobnicate(self):
+    ...         """Widgets can frobnicate (or not)"""
+    ...         self.widget.can_frobnicate()
+    ...     def test_likes_cheese(self):
+    ...         """Widgets might like cheese"""
+    ...         self.widget.likes_cheese()
+    ...     def shortDescription(self): # 2.7 compat
+    ...         try:
+    ...             doc = self._testMethodDoc
+    ...         except AttributeError:
+    ...             # 2.4 compat
+    ...             doc = self._TestCase__testMethodDoc
+    ...         return doc and doc.split("\n")[0].strip() or None
+
+The tests are bundled into a suite that we can pass to the test runner.
+
+    >>> def suite():
+    ...     return unittest.TestSuite([
+    ...         TestConfigurableWidget('test_can_frobnicate'),
+    ...         TestConfigurableWidget('test_likes_cheese')])
+
+When we run tests without first configuring the ConfigurableWidget,
+the tests fail.
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v']
+    >>> run(argv=argv, suite=suite())  # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ERROR
+    Widgets might like cheese ... ERROR
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Widgets can frobnicate (or not)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError: 'NoneType' object has no attribute 'get'
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Widgets might like cheese
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError: 'NoneType' object has no attribute 'get'
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=2)
+
+To configure the widget system before running tests, write a plugin
+that implements :meth:`begin() <nose.plugins.base.IPluginInterface.begin>`
+and initializes the system with a hard-coded configuration. (Later, we'll
+write a better plugin that accepts a command-line argument specifying the
+configuration file.)
+
+    >>> from nose.plugins import Plugin
+    >>> class ConfiguringPlugin(Plugin):
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     def begin(self):
+    ...         ConfigurableWidget.cfg = {}
+
+Now configure and execute a new test run using the plugin, which will
+inject the hard-coded configuration.
+
+    >>> run(argv=argv, suite=suite(),
+    ...     plugins=[ConfiguringPlugin()])  # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ok
+    Widgets might like cheese ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+This time the tests pass, because the widget class is configured.
+
+But the ConfiguringPlugin is pretty lame -- the configuration it
+installs is hard coded. A better plugin would allow the user to
+specify a configuration file on the command line:
+
+    >>> class BetterConfiguringPlugin(Plugin):
+    ...     def options(self, parser, env={}):
+    ...         parser.add_option('--widget-config', action='store',
+    ...                           dest='widget_config', default=None,
+    ...                           help='Specify path to widget config file')
+    ...     def configure(self, options, conf):
+    ...         if options.widget_config:
+    ...             self.load_config(options.widget_config)
+    ...             self.enabled = True
+    ...     def begin(self):
+    ...         ConfigurableWidget.cfg = self.cfg
+    ...     def load_config(self, path):
+    ...         from ConfigParser import ConfigParser
+    ...         p = ConfigParser()
+    ...         p.read([path])
+    ...         self.cfg = dict(p.items('DEFAULT'))
+
+To use the plugin, we need a config file.
+
+    >>> import os
+    >>> cfg_file = os.path.join(os.path.dirname(__file__), 'example.cfg')
+    >>> bytes = open(cfg_file, 'w').write("""\
+    ... [DEFAULT]
+    ... can_frobnicate = 1
+    ... likes_cheese = 0
+    ... """)
+
+Now we can execute a test run using that configuration, after first
+resetting the widget system to an unconfigured state.
+
+    >>> ConfigurableWidget.cfg = None
+    >>> argv = [__file__, '-v', '--widget-config', cfg_file]
+    >>> run(argv=argv, suite=suite(),
+    ...     plugins=[BetterConfiguringPlugin()]) # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ok
+    Widgets might like cheese ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+.. _Pylons: http://pylonshq.com/
+.. _Django: http://www.djangoproject.com/
diff --git a/doc/doc_tests/test_init_plugin/init_plugin.rst.py3.patch b/doc/doc_tests/test_init_plugin/init_plugin.rst.py3.patch
new file mode 100644 (file)
index 0000000..90a0a44
--- /dev/null
@@ -0,0 +1,10 @@
+--- init_plugin.rst.orig       2010-08-31 10:36:54.000000000 -0700
++++ init_plugin.rst    2010-08-31 10:37:30.000000000 -0700
+@@ -143,6 +143,7 @@
+     ... can_frobnicate = 1
+     ... likes_cheese = 0
+     ... """)
++    46
+ Now we can execute a test run using that configuration, after first
+ resetting the widget system to an unconfigured state.
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/__init__$py.class b/doc/doc_tests/test_issue089/support/unwanted_package/__init__$py.class
new file mode 100644 (file)
index 0000000..1ccf3e4
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/unwanted_package/__init__$py.class differ
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/__init__.py b/doc/doc_tests/test_issue089/support/unwanted_package/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/__init__.pyc b/doc/doc_tests/test_issue089/support/unwanted_package/__init__.pyc
new file mode 100644 (file)
index 0000000..d3e47f0
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/unwanted_package/__init__.pyc differ
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class b/doc/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class
new file mode 100644 (file)
index 0000000..2998cfd
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class differ
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/test_spam.py b/doc/doc_tests/test_issue089/support/unwanted_package/test_spam.py
new file mode 100644 (file)
index 0000000..cfd1cc1
--- /dev/null
@@ -0,0 +1,3 @@
+def test_spam():
+    assert True
+
diff --git a/doc/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc b/doc/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc
new file mode 100644 (file)
index 0000000..5598905
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc differ
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/__init__$py.class b/doc/doc_tests/test_issue089/support/wanted_package/__init__$py.class
new file mode 100644 (file)
index 0000000..bb74c14
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/wanted_package/__init__$py.class differ
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/__init__.py b/doc/doc_tests/test_issue089/support/wanted_package/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/__init__.pyc b/doc/doc_tests/test_issue089/support/wanted_package/__init__.pyc
new file mode 100644 (file)
index 0000000..5bd67e3
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/wanted_package/__init__.pyc differ
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class b/doc/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class
new file mode 100644 (file)
index 0000000..b32d690
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class differ
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/test_eggs.py b/doc/doc_tests/test_issue089/support/wanted_package/test_eggs.py
new file mode 100644 (file)
index 0000000..bb65550
--- /dev/null
@@ -0,0 +1,3 @@
+def test_eggs():
+    assert True
+
diff --git a/doc/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc b/doc/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc
new file mode 100644 (file)
index 0000000..c12ec5f
Binary files /dev/null and b/doc/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc differ
diff --git a/doc/doc_tests/test_issue089/unwanted_package.rst b/doc/doc_tests/test_issue089/unwanted_package.rst
new file mode 100644 (file)
index 0000000..c7efc27
--- /dev/null
@@ -0,0 +1,70 @@
+Excluding Unwanted Packages
+---------------------------
+
+Normally, nose discovery descends into all packages. Plugins can
+change this behavior by implementing :meth:`IPluginInterface.wantDirectory()`.
+
+In this example, we have a wanted package called ``wanted_package``
+and an unwanted package called ``unwanted_package``. 
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> support_files = [d for d in os.listdir(support)
+    ...                  if not d.startswith('.')]
+    >>> support_files.sort()
+    >>> support_files
+    ['unwanted_package', 'wanted_package']
+
+When we run nose normally, tests are loaded from both packages. 
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    unwanted_package.test_spam.test_spam ... ok
+    wanted_package.test_eggs.test_eggs ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+To exclude the tests in the unwanted package, we can write a simple
+plugin that implements :meth:`IPluginInterface.wantDirectory()` and returns ``False`` if
+the basename of the directory is ``"unwanted_package"``. This will
+prevent nose from descending into the unwanted package.
+
+    >>> from nose.plugins import Plugin
+    >>> class UnwantedPackagePlugin(Plugin):
+    ...     # no command line arg needed to activate plugin
+    ...     enabled = True
+    ...     name = "unwanted-package"
+    ...     
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     
+    ...     def wantDirectory(self, dirname):
+    ...         want = None
+    ...         if os.path.basename(dirname) == "unwanted_package":
+    ...             want = False
+    ...         return want
+
+In the next test run we use the plugin, and the unwanted package is
+not discovered.
+
+    >>> run(argv=argv,
+    ...     plugins=[UnwantedPackagePlugin()]) # doctest: +REPORT_NDIFF    
+    wanted_package.test_eggs.test_eggs ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
\ No newline at end of file
diff --git a/doc/doc_tests/test_issue097/plugintest_environment.rst b/doc/doc_tests/test_issue097/plugintest_environment.rst
new file mode 100644 (file)
index 0000000..99b37cf
--- /dev/null
@@ -0,0 +1,160 @@
+nose.plugins.plugintest, os.environ and sys.argv
+------------------------------------------------
+
+:class:`nose.plugins.plugintest.PluginTester` and
+:func:`nose.plugins.plugintest.run` are utilities for testing nose
+plugins.  When testing plugins, it should be possible to control the
+environment seen plugins under test, and that environment should never
+be affected by ``os.environ`` or ``sys.argv``.
+
+    >>> import os
+    >>> import sys
+    >>> import unittest
+    >>> import nose.config
+    >>> from nose.plugins import Plugin
+    >>> from nose.plugins.builtin import FailureDetail, Capture
+    >>> from nose.plugins.plugintest import PluginTester
+
+Our test plugin takes no command-line arguments and simply prints the
+environment it's given by nose.
+
+    >>> class PrintEnvPlugin(Plugin):
+    ...     name = "print-env"
+    ...
+    ...     # no command line arg needed to activate plugin
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         if not self.can_configure:
+    ...             return
+    ...         self.conf = conf
+    ...
+    ...     def options(self, parser, env={}):
+    ...         print "env:", env
+
+To test the argv, we use a config class that prints the argv it's
+given by nose.  We need to monkeypatch nose.config.Config, so that we
+can test the cases where that is used as the default.
+
+    >>> old_config = nose.config.Config
+    >>> class PrintArgvConfig(old_config):
+    ...
+    ...     def configure(self, argv=None, doc=None):
+    ...         print "argv:", argv
+    ...         old_config.configure(self, argv, doc)
+    >>> nose.config.Config = PrintArgvConfig
+
+The class under test, PluginTester, is designed to be used by
+subclassing.
+
+    >>> class Tester(PluginTester):
+    ...    activate = "-v"
+    ...    plugins = [PrintEnvPlugin(),
+    ...               FailureDetail(),
+    ...               Capture(),
+    ...               ]
+    ...
+    ...    def makeSuite(self):
+    ...        return unittest.TestSuite(tests=[])
+
+For the purposes of this test, we need a known ``os.environ`` and
+``sys.argv``.
+
+    >>> old_environ = os.environ
+    >>> old_argv = sys.argv
+    >>> os.environ = {"spam": "eggs"}
+    >>> sys.argv = ["spamtests"]
+
+PluginTester always uses the [nosetests, self.activate] as its argv.
+If ``env`` is not overridden, the default is an empty ``env``.
+
+    >>> tester = Tester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {}
+
+An empty ``env`` is respected...
+
+    >>> class EmptyEnvTester(Tester):
+    ...    env = {}
+    >>> tester = EmptyEnvTester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {}
+
+... as is a non-empty ``env``.
+
+    >>> class NonEmptyEnvTester(Tester):
+    ...    env = {"foo": "bar"}
+    >>> tester = NonEmptyEnvTester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+
+
+``nose.plugins.plugintest.run()`` should work analogously.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+    >>> run(env={},
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+    >>> run(env={"foo": "bar"},
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+An explicit argv parameter is honoured:
+
+    >>> run(argv=["spam"],
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['spam']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+An explicit config parameter with an env is honoured:
+
+    >>> from nose.plugins.manager import PluginManager
+    >>> manager = PluginManager(plugins=[PrintEnvPlugin()])
+    >>> config = PrintArgvConfig(env={"foo": "bar"}, plugins=manager)
+    >>> run(config=config,
+    ...     suite=unittest.TestSuite(tests=[])) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+
+Clean up.
+
+    >>> os.environ = old_environ
+    >>> sys.argv = old_argv
+    >>> nose.config.Config = old_config
diff --git a/doc/doc_tests/test_issue107/plugin_exceptions.rst b/doc/doc_tests/test_issue107/plugin_exceptions.rst
new file mode 100644 (file)
index 0000000..2c595f0
--- /dev/null
@@ -0,0 +1,149 @@
+When Plugins Fail
+-----------------
+
+Plugin methods should not fail silently. When a plugin method raises
+an exception before or during the execution of a test, the exception
+will be wrapped in a :class:`nose.failure.Failure` instance and appear as a
+failing test. Exceptions raised at other times, such as in the
+preparation phase with ``prepareTestLoader`` or ``prepareTestResult``,
+or after a test executes, in ``afterTest`` will stop the entire test
+run.
+
+    >>> import os
+    >>> import sys
+    >>> from nose.plugins import Plugin
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+Our first test plugins take no command-line arguments and raises
+AttributeError in beforeTest and afterTest. 
+
+    >>> class EnabledPlugin(Plugin):
+    ...     """Plugin that takes no command-line arguments"""
+    ...
+    ...     enabled = True
+    ...
+    ...     def configure(self, options, conf):
+    ...         pass
+    ...     def options(self, parser, env={}):
+    ...         pass    
+    >>> class FailBeforePlugin(EnabledPlugin):
+    ...     name = "fail-before"
+    ...            
+    ...     def beforeTest(self, test):
+    ...         raise AttributeError()    
+    >>> class FailAfterPlugin(EnabledPlugin):
+    ...     name = "fail-after"
+    ...            
+    ...     def afterTest(self, test):
+    ...         raise AttributeError()
+
+Running tests with the fail-before plugin enabled will result in all
+tests failing.
+
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> suitepath = os.path.join(support, 'test_spam.py')
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailBeforePlugin()])
+    EE
+    ======================================================================
+    ERROR: test_spam.test_spam
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError
+    <BLANKLINE>
+    ======================================================================
+    ERROR: test_spam.test_eggs
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=2)
+
+But with the fail-after plugin, the entire test run will fail.
+
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailAfterPlugin()])
+    Traceback (most recent call last):
+    ...
+    AttributeError
+
+Likewise, since the next plugin fails in a preparatory method, outside
+of test execution, the entire test run fails when the plugin is used.
+
+    >>> class FailPreparationPlugin(EnabledPlugin):
+    ...     name = "fail-prepare"
+    ...     
+    ...     def prepareTestLoader(self, loader):
+    ...         raise TypeError("That loader is not my type")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailPreparationPlugin()])
+    Traceback (most recent call last):
+    ...
+    TypeError: That loader is not my type
+
+
+Even AttributeErrors and TypeErrors are not silently suppressed as
+they used to be for some generative plugin methods (issue152).
+
+These methods caught TypeError and AttributeError and did not record
+the exception, before issue152 was fixed: .loadTestsFromDir(),
+.loadTestsFromModule(), .loadTestsFromTestCase(),
+loadTestsFromTestClass, and .makeTest().  Now, the exception is
+caught, but logged as a Failure.
+
+    >>> class FailLoadPlugin(EnabledPlugin):
+    ...     name = "fail-load"
+    ...     
+    ...     def loadTestsFromModule(self, module):
+    ...         # we're testing exception handling behaviour during
+    ...         # iteration, so be a generator function, without
+    ...         # actually yielding any tests
+    ...         if False:
+    ...             yield None
+    ...         raise TypeError("bug in plugin")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailLoadPlugin()])
+    ..E
+    ======================================================================
+    ERROR: Failure: TypeError (bug in plugin)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    TypeError: bug in plugin
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
+
+
+Also, before issue152 was resolved, .loadTestsFromFile() and
+.loadTestsFromName() didn't catch these errors at all, so the
+following test would crash nose:
+
+    >>> class FailLoadFromNamePlugin(EnabledPlugin):
+    ...     name = "fail-load-from-name"
+    ...     
+    ...     def loadTestsFromName(self, name, module=None, importPath=None):
+    ...         if False:
+    ...             yield None
+    ...         raise TypeError("bug in plugin")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailLoadFromNamePlugin()])
+    E
+    ======================================================================
+    ERROR: Failure: TypeError (bug in plugin)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    TypeError: bug in plugin
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
diff --git a/doc/doc_tests/test_issue107/support/test_spam$py.class b/doc/doc_tests/test_issue107/support/test_spam$py.class
new file mode 100644 (file)
index 0000000..a06d9e9
Binary files /dev/null and b/doc/doc_tests/test_issue107/support/test_spam$py.class differ
diff --git a/doc/doc_tests/test_issue107/support/test_spam.py b/doc/doc_tests/test_issue107/support/test_spam.py
new file mode 100644 (file)
index 0000000..4c1b8fb
--- /dev/null
@@ -0,0 +1,5 @@
+def test_spam():
+    assert True
+
+def test_eggs():
+    pass
diff --git a/doc/doc_tests/test_issue107/support/test_spam.pyc b/doc/doc_tests/test_issue107/support/test_spam.pyc
new file mode 100644 (file)
index 0000000..e1b44f6
Binary files /dev/null and b/doc/doc_tests/test_issue107/support/test_spam.pyc differ
diff --git a/doc/doc_tests/test_issue119/empty_plugin.rst b/doc/doc_tests/test_issue119/empty_plugin.rst
new file mode 100644 (file)
index 0000000..6194c19
--- /dev/null
@@ -0,0 +1,57 @@
+Minimal plugin
+--------------
+
+Plugins work as long as they implement the minimal interface required
+by nose.plugins.base. They do not have to derive from
+nose.plugins.Plugin.
+
+    >>> class NullPlugin(object):
+    ...
+    ...     enabled = True
+    ...     name = "null"
+    ...     score = 100
+    ...
+    ...     def options(self, parser, env):
+    ...         pass
+    ...
+    ...     def configure(self, options, conf):
+    ...         pass
+    >>> import unittest
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[NullPlugin()]) # doctest: +REPORT_NDIFF
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+Plugins can derive from nose.plugins.base and do nothing except set a
+name.
+
+    >>> import os
+    >>> from nose.plugins import Plugin
+    >>> class DerivedNullPlugin(Plugin):
+    ...
+    ...     name = "derived-null"
+
+Enabled plugin that's otherwise empty
+
+    >>> class EnabledDerivedNullPlugin(Plugin):
+    ...
+    ...     enabled = True
+    ...     name = "enabled-derived-null"
+    ...
+    ...     def options(self, parser, env=os.environ):
+    ...         pass
+    ...
+    ...     def configure(self, options, conf):
+    ...         if not self.can_configure:
+    ...             return
+    ...         self.conf = conf
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[DerivedNullPlugin(), EnabledDerivedNullPlugin()])
+    ...     # doctest: +REPORT_NDIFF
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
diff --git a/doc/doc_tests/test_issue119/test_zeronine$py.class b/doc/doc_tests/test_issue119/test_zeronine$py.class
new file mode 100644 (file)
index 0000000..8138d8c
Binary files /dev/null and b/doc/doc_tests/test_issue119/test_zeronine$py.class differ
diff --git a/doc/doc_tests/test_issue119/test_zeronine.py b/doc/doc_tests/test_issue119/test_zeronine.py
new file mode 100644 (file)
index 0000000..6a4f450
--- /dev/null
@@ -0,0 +1,26 @@
+import os
+import unittest
+from nose.plugins import Plugin
+from nose.plugins.plugintest import PluginTester
+from nose.plugins.manager import ZeroNinePlugin
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+support = os.path.join(os.path.dirname(os.path.dirname(here)), 'support')
+
+
+class EmptyPlugin(Plugin):
+    pass
+
+class TestEmptyPlugin(PluginTester, unittest.TestCase):
+    activate = '--with-empty'
+    plugins = [ZeroNinePlugin(EmptyPlugin())]
+    suitepath = os.path.join(here, 'empty_plugin.rst')
+
+    def test_empty_zero_nine_does_not_crash(self):
+        print self.output
+        assert "'EmptyPlugin' object has no attribute 'loadTestsFromPath'" \
+            not in self.output
+
+    
+
diff --git a/doc/doc_tests/test_issue119/test_zeronine.pyc b/doc/doc_tests/test_issue119/test_zeronine.pyc
new file mode 100644 (file)
index 0000000..4e1e946
Binary files /dev/null and b/doc/doc_tests/test_issue119/test_zeronine.pyc differ
diff --git a/doc/doc_tests/test_issue142/errorclass_failure.rst b/doc/doc_tests/test_issue142/errorclass_failure.rst
new file mode 100644 (file)
index 0000000..c4ce287
--- /dev/null
@@ -0,0 +1,124 @@
+Failure of Errorclasses
+-----------------------
+
+Errorclasses (skips, deprecations, etc.) define whether or not they
+represent test failures.
+
+    >>> import os
+    >>> import sys
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> from nose.plugins.skip import Skip
+    >>> from nose.plugins.deprecated import Deprecated
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> sys.path.insert(0, support)
+    >>> from errorclass_failure_plugin import Todo, TodoPlugin, \
+    ...                                       NonFailureTodoPlugin
+    >>> todo_test = os.path.join(support, 'errorclass_failing_test.py')
+    >>> misc_test = os.path.join(support, 'errorclass_tests.py')
+
+nose.plugins.errorclass.ErrorClass has an argument ``isfailure``. With a
+true isfailure, when the errorclass' exception is raised by a test,
+tracebacks are printed.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", todo_test],
+    ...     plugins=[TodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    errorclass_failing_test.test_2 ... ok
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_failing_test.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    FAILED (TODO=1)
+
+
+Also, ``--stop`` stops the test run.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", "--stop", todo_test],
+    ...     plugins=[TodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_failing_test.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (TODO=1)
+
+
+With a false .isfailure, errorclass exceptions raised by tests are
+treated as "ignored errors."  For ignored errors, tracebacks are not
+printed, and the test run does not stop.
+
+    >>> run(argv=["nosetests", "-v", "--with-non-failure-todo", "--stop",
+    ...           todo_test],
+    ...     plugins=[NonFailureTodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    errorclass_failing_test.test_2 ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK (TODO=1)
+
+
+Exception detail strings of errorclass errors are always printed when
+-v is in effect, regardless of whether the error is ignored.  Note
+that exception detail strings may have more than one line.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", misc_test],
+    ...     plugins=[TodoPlugin(), Skip(), Deprecated()])
+    ... # doctest: +REPORT_NDIFF
+    errorclass_tests.test_todo ... TODO: fix me
+    errorclass_tests.test_2 ... ok
+    errorclass_tests.test_3 ... SKIP: skipety-skip
+    errorclass_tests.test_4 ... SKIP
+    errorclass_tests.test_5 ... DEPRECATED: spam
+    eggs
+    <BLANKLINE>
+    spam
+    errorclass_tests.test_6 ... DEPRECATED: spam
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_tests.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    FAILED (DEPRECATED=2, SKIP=2, TODO=1)
+
+Without -v, the exception detail strings are only displayed if the
+error is not ignored (otherwise, there's no traceback).
+
+    >>> run(argv=["nosetests", "--with-todo", misc_test],
+    ...     plugins=[TodoPlugin(), Skip(), Deprecated()])
+    ... # doctest: +REPORT_NDIFF
+    T.SSDD
+    ======================================================================
+    TODO: errorclass_tests.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    FAILED (DEPRECATED=2, SKIP=2, TODO=1)
+
+>>> sys.path.remove(support)
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failing_test$py.class b/doc/doc_tests/test_issue142/support/errorclass_failing_test$py.class
new file mode 100644 (file)
index 0000000..cf2de6b
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_failing_test$py.class differ
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failing_test.py b/doc/doc_tests/test_issue142/support/errorclass_failing_test.py
new file mode 100644 (file)
index 0000000..fae3c75
--- /dev/null
@@ -0,0 +1,7 @@
+from errorclass_failure_plugin import Todo
+
+def test_todo():
+    raise Todo("fix me")
+
+def test_2():
+    pass
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failing_test.pyc b/doc/doc_tests/test_issue142/support/errorclass_failing_test.pyc
new file mode 100644 (file)
index 0000000..47adf54
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_failing_test.pyc differ
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class b/doc/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class
new file mode 100644 (file)
index 0000000..a77f915
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class differ
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failure_plugin.py b/doc/doc_tests/test_issue142/support/errorclass_failure_plugin.py
new file mode 100644 (file)
index 0000000..927c986
--- /dev/null
@@ -0,0 +1,16 @@
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+class Todo(Exception):
+    pass
+
+class TodoPlugin(ErrorClassPlugin):
+
+    name = "todo"
+
+    todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+class NonFailureTodoPlugin(ErrorClassPlugin):
+
+    name = "non-failure-todo"
+
+    todo = ErrorClass(Todo, label='TODO', isfailure=False)
diff --git a/doc/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc b/doc/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc
new file mode 100644 (file)
index 0000000..7d5ca37
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc differ
diff --git a/doc/doc_tests/test_issue142/support/errorclass_tests$py.class b/doc/doc_tests/test_issue142/support/errorclass_tests$py.class
new file mode 100644 (file)
index 0000000..9993086
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_tests$py.class differ
diff --git a/doc/doc_tests/test_issue142/support/errorclass_tests.py b/doc/doc_tests/test_issue142/support/errorclass_tests.py
new file mode 100644 (file)
index 0000000..4981224
--- /dev/null
@@ -0,0 +1,20 @@
+from errorclass_failure_plugin import Todo
+from nose import SkipTest, DeprecatedTest
+
+def test_todo():
+    raise Todo('fix me')
+
+def test_2():
+    pass
+
+def test_3():
+    raise SkipTest('skipety-skip')
+
+def test_4():
+    raise SkipTest()
+
+def test_5():
+    raise DeprecatedTest('spam\neggs\n\nspam')
+
+def test_6():
+    raise DeprecatedTest('spam')
diff --git a/doc/doc_tests/test_issue142/support/errorclass_tests.pyc b/doc/doc_tests/test_issue142/support/errorclass_tests.pyc
new file mode 100644 (file)
index 0000000..9e95e34
Binary files /dev/null and b/doc/doc_tests/test_issue142/support/errorclass_tests.pyc differ
diff --git a/doc/doc_tests/test_issue145/imported_tests.rst b/doc/doc_tests/test_issue145/imported_tests.rst
new file mode 100644 (file)
index 0000000..c4eee78
--- /dev/null
@@ -0,0 +1,117 @@
+Importing Tests
+---------------
+
+When a package imports tests from another package, the tests are
+**completely** relocated into the importing package. This means that the
+fixtures from the source package are **not** run when the tests in the
+importing package are executed.
+
+For example, consider this collection of packages:
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> from nose.util import ls_tree
+    >>> print ls_tree(support) # doctest: +REPORT_NDIFF
+    |-- package1
+    |   |-- __init__.py
+    |   `-- test_module.py
+    |-- package2c
+    |   |-- __init__.py
+    |   `-- test_module.py
+    `-- package2f
+        |-- __init__.py
+        `-- test_module.py
+
+In these packages, the tests are all defined in package1, and are imported
+into package2f and package2c.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+package1 has fixtures, which we can see by running all of the tests. Note
+below that the test names reflect the modules into which the tests are
+imported, not the source modules.
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package1 setup
+    test (package1.test_module.TestCase) ... ok
+    package1.test_module.TestClass.test_class ... ok
+    package1.test_module.test_function ... ok
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    package2c.test_module.TestClass.test_class ... ok
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    OK
+
+When tests are run in package2f or package2c, only the fixtures from those
+packages are executed.
+
+    >>> argv = [__file__, '-v', os.path.join(support, 'package2f')]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v', os.path.join(support, 'package2c')]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    package2c.test_module.TestClass.test_class ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+This also applies when only the specific tests are selected via the
+command-line.
+
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2c', 'test_module.py') +
+    ...         ':TestClass.test_class']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    package2c.test_module.TestClass.test_class ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2c', 'test_module.py') +
+    ...         ':TestCase.test']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2f', 'test_module.py') +
+    ...         ':test_function']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
diff --git a/doc/doc_tests/test_issue145/support/package1/__init__$py.class b/doc/doc_tests/test_issue145/support/package1/__init__$py.class
new file mode 100644 (file)
index 0000000..1186513
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package1/__init__$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package1/__init__.py b/doc/doc_tests/test_issue145/support/package1/__init__.py
new file mode 100644 (file)
index 0000000..c715fdb
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package1 setup'
diff --git a/doc/doc_tests/test_issue145/support/package1/__init__.pyc b/doc/doc_tests/test_issue145/support/package1/__init__.pyc
new file mode 100644 (file)
index 0000000..8000d71
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package1/__init__.pyc differ
diff --git a/doc/doc_tests/test_issue145/support/package1/test_module$py.class b/doc/doc_tests/test_issue145/support/package1/test_module$py.class
new file mode 100644 (file)
index 0000000..2566ada
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package1/test_module$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package1/test_module.py b/doc/doc_tests/test_issue145/support/package1/test_module.py
new file mode 100644 (file)
index 0000000..0c5ac78
--- /dev/null
@@ -0,0 +1,12 @@
+import unittest
+
+def test_function():
+    pass
+
+class TestClass:
+    def test_class(self):
+        pass
+
+class TestCase(unittest.TestCase):
+    def test(self):
+        pass
diff --git a/doc/doc_tests/test_issue145/support/package1/test_module.pyc b/doc/doc_tests/test_issue145/support/package1/test_module.pyc
new file mode 100644 (file)
index 0000000..9d033ce
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package1/test_module.pyc differ
diff --git a/doc/doc_tests/test_issue145/support/package2c/__init__$py.class b/doc/doc_tests/test_issue145/support/package2c/__init__$py.class
new file mode 100644 (file)
index 0000000..ddc721a
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2c/__init__$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package2c/__init__.py b/doc/doc_tests/test_issue145/support/package2c/__init__.py
new file mode 100644 (file)
index 0000000..106401f
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package2c setup'
diff --git a/doc/doc_tests/test_issue145/support/package2c/__init__.pyc b/doc/doc_tests/test_issue145/support/package2c/__init__.pyc
new file mode 100644 (file)
index 0000000..394ffd8
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2c/__init__.pyc differ
diff --git a/doc/doc_tests/test_issue145/support/package2c/test_module$py.class b/doc/doc_tests/test_issue145/support/package2c/test_module$py.class
new file mode 100644 (file)
index 0000000..4a98b21
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2c/test_module$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package2c/test_module.py b/doc/doc_tests/test_issue145/support/package2c/test_module.py
new file mode 100644 (file)
index 0000000..6affbf1
--- /dev/null
@@ -0,0 +1 @@
+from package1.test_module import TestClass, TestCase
diff --git a/doc/doc_tests/test_issue145/support/package2c/test_module.pyc b/doc/doc_tests/test_issue145/support/package2c/test_module.pyc
new file mode 100644 (file)
index 0000000..f231327
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2c/test_module.pyc differ
diff --git a/doc/doc_tests/test_issue145/support/package2f/__init__$py.class b/doc/doc_tests/test_issue145/support/package2f/__init__$py.class
new file mode 100644 (file)
index 0000000..704c4c7
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2f/__init__$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package2f/__init__.py b/doc/doc_tests/test_issue145/support/package2f/__init__.py
new file mode 100644 (file)
index 0000000..fc203eb
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package2f setup'
diff --git a/doc/doc_tests/test_issue145/support/package2f/__init__.pyc b/doc/doc_tests/test_issue145/support/package2f/__init__.pyc
new file mode 100644 (file)
index 0000000..28350da
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2f/__init__.pyc differ
diff --git a/doc/doc_tests/test_issue145/support/package2f/test_module$py.class b/doc/doc_tests/test_issue145/support/package2f/test_module$py.class
new file mode 100644 (file)
index 0000000..98feca7
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2f/test_module$py.class differ
diff --git a/doc/doc_tests/test_issue145/support/package2f/test_module.py b/doc/doc_tests/test_issue145/support/package2f/test_module.py
new file mode 100644 (file)
index 0000000..e353c62
--- /dev/null
@@ -0,0 +1 @@
+from package1.test_module import test_function
diff --git a/doc/doc_tests/test_issue145/support/package2f/test_module.pyc b/doc/doc_tests/test_issue145/support/package2f/test_module.pyc
new file mode 100644 (file)
index 0000000..a84068d
Binary files /dev/null and b/doc/doc_tests/test_issue145/support/package2f/test_module.pyc differ
diff --git a/doc/doc_tests/test_multiprocess/multiprocess.rst b/doc/doc_tests/test_multiprocess/multiprocess.rst
new file mode 100644 (file)
index 0000000..d463ba0
--- /dev/null
@@ -0,0 +1,269 @@
+Parallel Testing with nose
+--------------------------
+
+.. Note ::
+
+   Use of the multiprocess plugin on python 2.5 or earlier requires
+   the multiprocessing_ module, available from PyPI and at
+   http://code.google.com/p/python-multiprocessing/.
+
+..
+
+Using the `nose.plugin.multiprocess` plugin, you can parallelize a
+test run across a configurable number of worker processes. While this can
+speed up CPU-bound test runs, it is mainly useful for IO-bound tests
+that spend most of their time waiting for data to arrive from someplace
+else and can benefit from parallelization.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process separately,
+and to have enough worker processes that the entire test run takes only as
+long as the slowest test. This ideal is not attainable in all cases, however,
+because many test suites depend on context (class, module or package)
+fixtures.
+
+Some context fixtures are re-entrant -- that is, they can be called many times
+concurrently. Other context fixtures can be shared among tests running in
+different processes. Still others must be run once and only once for a given
+set of tests, and must be in the same process as the tests themselves.
+
+The plugin can't know the difference between these types of context fixtures
+unless you tell it, so the default behavior is to dispatch the entire context
+suite to a worker as a unit. This way, the fixtures are run once, in the same
+process as the tests. (That, of course, is how they are run when the plugin
+is not active: All tests are run in a single process.)
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute multiple times, typically once per test, and concurrently.
+
+For example, a module that contains re-entrant fixtures might look like::
+
+  _multiprocess_can_split_ = True
+
+  def setup():
+      ...
+
+A class might look like::
+
+  class TestClass:
+      _multiprocess_can_split_ = True
+
+      @classmethod
+      def setup_class(cls):
+          ...
+
+Alternatively, if a context's fixtures may only be run once, or may not run
+concurrently, but *may* be shared by tests running in different processes
+-- for instance a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. Fixtures for contexts so marked will execute in the primary nose
+process, and tests in those contexts will be individually dispatched to run in
+parallel.
+
+A module with shareable fixtures might look like::
+
+  _multiprocess_shared_ = True
+
+  def setup():
+      ...
+
+A class might look like::
+
+  class TestClass:
+      _multiprocess_shared_ = True
+
+      @classmethod
+      def setup_class(cls):
+          ...
+
+These options are mutually exclusive: you can't mark a context as both
+splittable and shareable.
+
+Example
+~~~~~~~
+
+Consider three versions of the same test suite. One
+is marked ``_multiprocess_shared_``, another ``_multiprocess_can_split_``,
+and the third is unmarked. They all define the same fixtures:
+
+    called = []
+
+    def setup():
+        print "setup called"
+        called.append('setup')
+
+    def teardown():
+        print "teardown called"
+        called.append('teardown')
+
+And each has two tests that just test that ``setup()`` has been called
+once and only once.
+
+When run without the multiprocess plugin, fixtures for the shared,
+can-split and not-shared test suites execute at the same times, and
+all tests pass.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> test_not_shared = os.path.join(support, 'test_not_shared.py')
+    >>> test_shared = os.path.join(support, 'test_shared.py')
+    >>> test_can_split = os.path.join(support, 'test_can_split.py')
+
+The module with shared fixtures passes.
+
+    >>> run(argv=['nosetests', '-v', test_shared]) #doctest: +REPORT_NDIFF
+    setup called
+    test_shared.TestMe.test_one ... ok
+    test_shared.test_a ... ok
+    test_shared.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+As does the module with no fixture annotations.
+
+    >>> run(argv=['nosetests', '-v', test_not_shared]) #doctest: +REPORT_NDIFF
+    setup called
+    test_not_shared.TestMe.test_one ... ok
+    test_not_shared.test_a ... ok
+    test_not_shared.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+And the module that marks its fixtures as re-entrant.
+
+    >>> run(argv=['nosetests', '-v', test_can_split]) #doctest: +REPORT_NDIFF
+    setup called
+    test_can_split.TestMe.test_one ... ok
+    test_can_split.test_a ... ok
+    test_can_split.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+However, when run with the ``--processes=2`` switch, each test module
+behaves differently.
+
+    >>> from nose.plugins.multiprocess import MultiProcess
+
+The module marked ``_multiprocess_shared_`` executes correctly, although as with
+any use of the multiprocess plugin, the order in which the tests execute is
+indeterminate.
+
+First we have to reset all of the test modules.
+
+    >>> import sys
+    >>> sys.modules['test_not_shared'].called[:] = []
+    >>> sys.modules['test_can_split'].called[:] = []
+
+Then we can run the tests again with the multiprocess plugin active.
+    
+    >>> run(argv=['nosetests', '-v', '--processes=2', test_shared],
+    ...     plugins=[MultiProcess()]) #doctest: +ELLIPSIS
+    setup called
+    test_shared.... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+As does the one not marked -- however in this case, ``--processes=2``
+will do *nothing at all*: since the tests are in a module with
+unmarked fixtures, the entire test module will be dispatched to a
+single runner process.
+
+However, the module marked ``_multiprocess_can_split_`` will fail, since
+the fixtures *are not reentrant*. A module such as this *must not* be
+marked ``_multiprocess_can_split_``, or tests will fail in one or more
+runner processes as fixtures are re-executed.
+
+We have to reset all of the test modules again.
+
+    >>> import sys
+    >>> sys.modules['test_not_shared'].called[:] = []
+    >>> sys.modules['test_can_split'].called[:] = []
+
+Then we can run again and see the failures.
+
+    >>> run(argv=['nosetests', '-v', '--processes=2', test_can_split],
+    ...     plugins=[MultiProcess()]) #doctest: +ELLIPSIS
+    setup called
+    teardown called
+    test_can_split....
+    ...
+    FAILED (failures=...)
+
+Other differences in test running
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The main difference between using the multiprocess plugin and not doing so
+is obviously that tests run concurrently under multiprocess. However, there
+are a few other differences that may impact your test suite:
+
+* More tests may be found
+
+  Because tests are dispatched to worker processes by name, a worker
+  process may find and run tests in a module that would not be found during a
+  normal test run. For instance, if a non-test module contains a test-like
+  function, that function would be discovered as a test in a worker process
+  if the entire module is dispatched to the worker. This is because worker
+  processes load tests in *directed* mode -- the same way that nose loads
+  tests when you explicitly name a module -- rather than in *discovered* mode,
+  the mode nose uses when looking for tests in a directory.
+
+* Out-of-order output
+
+  Test results are collected by workers and returned to the master process for
+  output. Since different processes may complete their tests at different
+  times, test result output order is not determinate.
+
+* Plugin interaction warning
+
+  The multiprocess plugin does not work well with other plugins that expect to
+  wrap or gain control of the test-running process. Examples from nose's 
+  builtin plugins include coverage and profiling: a test run using
+  both multiprocess and either of those is likely to fail in some
+  confusing and spectacular way.
+
+* Python 2.6 warning
+
+  This is unlikely to impact you unless you are writing tests for nose itself,
+  but be aware that under python 2.6, the multiprocess plugin is not
+  re-entrant. For example, when running nose with the plugin active, you can't
+  use subprocess to launch another copy of nose that also uses the
+  multiprocess plugin. This is why this test is skipped under python 2.6 when
+  run with the ``--processes`` switch.
diff --git a/doc/doc_tests/test_multiprocess/multiprocess_fixtures$py.class b/doc/doc_tests/test_multiprocess/multiprocess_fixtures$py.class
new file mode 100644 (file)
index 0000000..c373f17
Binary files /dev/null and b/doc/doc_tests/test_multiprocess/multiprocess_fixtures$py.class differ
diff --git a/doc/doc_tests/test_multiprocess/multiprocess_fixtures.py b/doc/doc_tests/test_multiprocess/multiprocess_fixtures.py
new file mode 100644 (file)
index 0000000..8b4c3af
--- /dev/null
@@ -0,0 +1,16 @@
+from nose.plugins.skip import SkipTest
+from nose.plugins.multiprocess import MultiProcess
+
+_multiprocess_can_split_ = True
+
+def setup_module():
+    try:
+        import multiprocessing
+        if 'active' in MultiProcess.status:
+            raise SkipTest("Multiprocess plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("multiprocessing module not available")
+
+
+
diff --git a/doc/doc_tests/test_multiprocess/multiprocess_fixtures.pyc b/doc/doc_tests/test_multiprocess/multiprocess_fixtures.pyc
new file mode 100644 (file)
index 0000000..a617865
Binary files /dev/null and b/doc/doc_tests/test_multiprocess/multiprocess_fixtures.pyc differ
diff --git a/doc/doc_tests/test_multiprocess/support/test_can_split.py b/doc/doc_tests/test_multiprocess/support/test_can_split.py
new file mode 100644 (file)
index 0000000..a7ae6e7
--- /dev/null
@@ -0,0 +1,30 @@
+import sys
+called = []
+
+_multiprocess_can_split_ = 1
+
+def setup():
+    print >> sys.stderr, "setup called"
+    called.append('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    called.append('teardown')
+
+
+def test_a():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/doc/doc_tests/test_multiprocess/support/test_can_split.pyc b/doc/doc_tests/test_multiprocess/support/test_can_split.pyc
new file mode 100644 (file)
index 0000000..311493e
Binary files /dev/null and b/doc/doc_tests/test_multiprocess/support/test_can_split.pyc differ
diff --git a/doc/doc_tests/test_multiprocess/support/test_not_shared.py b/doc/doc_tests/test_multiprocess/support/test_not_shared.py
new file mode 100644 (file)
index 0000000..8542109
--- /dev/null
@@ -0,0 +1,30 @@
+import sys
+called = []
+
+_multiprocess_ = 1
+
+def setup():
+    print >> sys.stderr, "setup called"
+    called.append('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    called.append('teardown')
+
+
+def test_a():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/doc/doc_tests/test_multiprocess/support/test_not_shared.pyc b/doc/doc_tests/test_multiprocess/support/test_not_shared.pyc
new file mode 100644 (file)
index 0000000..5d8a9ee
Binary files /dev/null and b/doc/doc_tests/test_multiprocess/support/test_not_shared.pyc differ
diff --git a/doc/doc_tests/test_multiprocess/support/test_shared.py b/doc/doc_tests/test_multiprocess/support/test_shared.py
new file mode 100644 (file)
index 0000000..d8617f8
--- /dev/null
@@ -0,0 +1,49 @@
+import os
+import sys
+
+here = os.path.dirname(__file__)
+flag = os.path.join(here, 'shared_flag')
+
+_multiprocess_shared_ = 1
+
+def _log(val):
+    ff = open(flag, 'a+')
+    ff.write(val)
+    ff.write("\n")
+    ff.close()
+
+
+def _clear():
+    if os.path.isfile(flag):
+        os.unlink(flag)
+
+        
+def logged():
+    return [line for line in open(flag, 'r')]
+
+
+def setup():
+    print >> sys.stderr, "setup called"
+    _log('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    _clear()
+
+    
+def test_a():
+    assert len(logged()) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(logged()) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/doc/doc_tests/test_multiprocess/support/test_shared.pyc b/doc/doc_tests/test_multiprocess/support/test_shared.pyc
new file mode 100644 (file)
index 0000000..d48dca8
Binary files /dev/null and b/doc/doc_tests/test_multiprocess/support/test_shared.pyc differ
diff --git a/doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst b/doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst
new file mode 100644 (file)
index 0000000..9513fdf
--- /dev/null
@@ -0,0 +1,89 @@
+Restricted Plugin Managers
+--------------------------
+
+In some cases, such as running under the ``python setup.py test`` command,
+nose is not able to use all available plugins. In those cases, a
+`nose.plugins.manager.RestrictedPluginManager` is used to exclude plugins that
+implement API methods that nose is unable to call.
+
+Support files for this test are in the support directory.
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+
+For this test, we'll use a simple plugin that implements the ``startTest``
+method.
+
+    >>> from nose.plugins.base import Plugin
+    >>> from nose.plugins.manager import RestrictedPluginManager
+    >>> class StartPlugin(Plugin):
+    ...     def startTest(self, test):
+    ...         print "started %s" % test
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+When run with a normal plugin manager, the plugin executes.
+
+    >>> argv = ['plugintest', '-v', '--with-startplugin', support]
+    >>> run(argv=argv, plugins=[StartPlugin()]) # doctest: +REPORT_NDIFF
+    started test.test
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+However, when run with a restricted plugin manager configured to exclude
+plugins implementing `startTest`, an exception is raised and nose exits.
+
+    >>> restricted = RestrictedPluginManager(
+    ...     plugins=[StartPlugin()], exclude=('startTest',), load=False)
+    >>> run(argv=argv, plugins=restricted) #doctest: +REPORT_NDIFF +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    SystemExit: ...
+
+Errors are only raised when options defined by excluded plugins are used.
+
+    >>> argv = ['plugintest', '-v', support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +REPORT_NDIFF
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+When a disabled option appears in a configuration file, instead of on the
+command line, a warning is raised instead of an exception.
+
+    >>> argv = ['plugintest', '-v', '-c', os.path.join(support, 'start.cfg'),
+    ...         support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+    RuntimeWarning: Option 'with-startplugin' in config file '...start.cfg' ignored: excluded by runtime environment
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+However, if an option appears in a configuration file that is not recognized
+either as an option defined by nose, or by an active or excluded plugin, an
+error is raised.
+
+    >>> argv = ['plugintest', '-v', '-c', os.path.join(support, 'bad.cfg'),
+    ...         support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
diff --git a/doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch b/doc/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch
new file mode 100644 (file)
index 0000000..51a09b4
--- /dev/null
@@ -0,0 +1,9 @@
+--- restricted_plugin_options.rst.orig 2010-08-31 10:57:04.000000000 -0700
++++ restricted_plugin_options.rst      2010-08-31 10:57:51.000000000 -0700
+@@ -86,5 +86,5 @@
+     >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+     Traceback (most recent call last):
+     ...
+-    ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
++    nose.config.ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
diff --git a/doc/doc_tests/test_restricted_plugin_options/support/bad.cfg b/doc/doc_tests/test_restricted_plugin_options/support/bad.cfg
new file mode 100644 (file)
index 0000000..c050ec4
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+with-meltedcheese=1
\ No newline at end of file
diff --git a/doc/doc_tests/test_restricted_plugin_options/support/start.cfg b/doc/doc_tests/test_restricted_plugin_options/support/start.cfg
new file mode 100644 (file)
index 0000000..ea1e289
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+with-startplugin=1
\ No newline at end of file
diff --git a/doc/doc_tests/test_restricted_plugin_options/support/test$py.class b/doc/doc_tests/test_restricted_plugin_options/support/test$py.class
new file mode 100644 (file)
index 0000000..2336360
Binary files /dev/null and b/doc/doc_tests/test_restricted_plugin_options/support/test$py.class differ
diff --git a/doc/doc_tests/test_restricted_plugin_options/support/test.py b/doc/doc_tests/test_restricted_plugin_options/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/doc/doc_tests/test_restricted_plugin_options/support/test.pyc b/doc/doc_tests/test_restricted_plugin_options/support/test.pyc
new file mode 100644 (file)
index 0000000..46c102a
Binary files /dev/null and b/doc/doc_tests/test_restricted_plugin_options/support/test.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/selector_plugin.rst b/doc/doc_tests/test_selector_plugin/selector_plugin.rst
new file mode 100644 (file)
index 0000000..f5f7913
--- /dev/null
@@ -0,0 +1,119 @@
+Using a Custom Selector
+-----------------------
+
+By default, nose uses a `nose.selector.Selector` instance to decide
+what is and is not a test. The default selector is fairly simple: for
+the most part, if an object's name matches the ``testMatch`` regular
+expression defined in the active `nose.config.Config` instance, the
+object is selected as a test. 
+
+This behavior is fine for new projects, but may be undesireable for
+older projects with a different test naming scheme. Fortunately, you
+can easily override this behavior by providing a custom selector using
+a plugin.
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+
+In this example, the project to be tested consists of a module and
+package and associated tests, laid out like this::
+
+    >>> from nose.util import ls_tree
+    >>> print ls_tree(support)
+    |-- mymodule.py
+    |-- mypackage
+    |   |-- __init__.py
+    |   |-- strings.py
+    |   `-- math
+    |       |-- __init__.py
+    |       `-- basic.py
+    `-- tests
+        |-- testlib.py
+        |-- math
+        |   `-- basic.py
+        |-- mymodule
+        |   `-- my_function.py
+        `-- strings
+            `-- cat.py
+
+Because the test modules do not include ``test`` in their names,
+nose's default selector is unable to discover this project's tests.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv)
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+The tests for the example project follow a few basic conventions:
+
+* The are all located under the tests/ directory.
+* Test modules are organized into groups under directories named for
+  the module or package they test.
+* testlib is *not* a test module, but it must be importable by the
+  test modules.
+* Test modules contain unitest.TestCase classes that are tests, and
+  may contain other functions or classes that are NOT tests, no matter
+  how they are named.
+
+We can codify those conventions in a selector class.
+
+    >>> from nose.selector import Selector
+    >>> import unittest
+    >>> class MySelector(Selector):
+    ...     def wantDirectory(self, dirname):
+    ...         # we want the tests directory and all directories
+    ...         # beneath it, and no others
+    ...         parts = dirname.split(os.path.sep)
+    ...         return 'tests' in parts
+    ...     def wantFile(self, filename):
+    ...         # we want python modules under tests/, except testlib
+    ...         parts = filename.split(os.path.sep)
+    ...         base, ext = os.path.splitext(parts[-1])
+    ...         return 'tests' in parts and ext == '.py' and base != 'testlib'
+    ...     def wantModule(self, module):
+    ...         # wantDirectory and wantFile above will ensure that
+    ...         # we never see an unwanted module
+    ...         return True
+    ...     def wantFunction(self, function):
+    ...         # never collect functions
+    ...         return False
+    ...     def wantClass(self, cls):
+    ...         # only collect TestCase subclasses
+    ...         return issubclass(cls, unittest.TestCase)
+
+To use our selector class, we need a plugin that can inject it into
+the test loader.
+
+    >>> from nose.plugins import Plugin
+    >>> class UseMySelector(Plugin):
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     def prepareTestLoader(self, loader):
+    ...         loader.selector = MySelector(loader.config)
+
+Now we can execute a test run using the custom selector, and the
+project's tests will be collected.
+
+    >>> run(argv=argv, plugins=[UseMySelector()])
+    test_add (basic.TestBasicMath) ... ok
+    test_sub (basic.TestBasicMath) ... ok
+    test_tuple_groups (my_function.MyFunction) ... ok
+    test_cat (cat.StringsCat) ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 4 tests in ...s
+    <BLANKLINE>
+    OK
diff --git a/doc/doc_tests/test_selector_plugin/support/mymodule$py.class b/doc/doc_tests/test_selector_plugin/support/mymodule$py.class
new file mode 100644 (file)
index 0000000..86c6620
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mymodule$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mymodule.py b/doc/doc_tests/test_selector_plugin/support/mymodule.py
new file mode 100644 (file)
index 0000000..66b3c16
--- /dev/null
@@ -0,0 +1,2 @@
+def my_function(a, b, c):
+    return (a, (b, c))
diff --git a/doc/doc_tests/test_selector_plugin/support/mymodule.pyc b/doc/doc_tests/test_selector_plugin/support/mymodule.pyc
new file mode 100644 (file)
index 0000000..6320e75
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mymodule.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class b/doc/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class
new file mode 100644 (file)
index 0000000..34dc2b3
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/__init__.py b/doc/doc_tests/test_selector_plugin/support/mypackage/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc b/doc/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc
new file mode 100644 (file)
index 0000000..b8730b7
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class b/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class
new file mode 100644 (file)
index 0000000..f40b427
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py b/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py
new file mode 100644 (file)
index 0000000..04e0659
--- /dev/null
@@ -0,0 +1 @@
+from mypackage.math.basic import *
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc b/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc
new file mode 100644 (file)
index 0000000..6eaebcd
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class b/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class
new file mode 100644 (file)
index 0000000..dca938c
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.py b/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.py
new file mode 100644 (file)
index 0000000..6cddd28
--- /dev/null
@@ -0,0 +1,5 @@
+def add(a, b):
+    return a + b
+
+def sub(a, b):
+    return a - b
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc b/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc
new file mode 100644 (file)
index 0000000..9f434d6
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/strings$py.class b/doc/doc_tests/test_selector_plugin/support/mypackage/strings$py.class
new file mode 100644 (file)
index 0000000..ba232ce
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/strings$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/strings.py b/doc/doc_tests/test_selector_plugin/support/mypackage/strings.py
new file mode 100644 (file)
index 0000000..8ffc4cc
--- /dev/null
@@ -0,0 +1,2 @@
+def cat(a, b):
+    return "%s%s" % (a, b)
diff --git a/doc/doc_tests/test_selector_plugin/support/mypackage/strings.pyc b/doc/doc_tests/test_selector_plugin/support/mypackage/strings.pyc
new file mode 100644 (file)
index 0000000..e18782f
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/mypackage/strings.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/math/basic$py.class b/doc/doc_tests/test_selector_plugin/support/tests/math/basic$py.class
new file mode 100644 (file)
index 0000000..826ddf8
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/math/basic$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/math/basic.py b/doc/doc_tests/test_selector_plugin/support/tests/math/basic.py
new file mode 100644 (file)
index 0000000..7639ddc
--- /dev/null
@@ -0,0 +1,17 @@
+import testlib
+from mypackage import math
+
+
+class TestBasicMath(testlib.Base):
+
+    def test_add(self):
+        self.assertEqual(math.add(1, 2), 3)
+
+    def test_sub(self):
+        self.assertEqual(math.sub(3, 1), 2)
+
+
+class TestHelperClass:
+    def __init__(self):
+        raise Exception(
+            "This test helper class should not be collected")
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/math/basic.pyc b/doc/doc_tests/test_selector_plugin/support/tests/math/basic.pyc
new file mode 100644 (file)
index 0000000..d29ab0b
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/math/basic.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class b/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class
new file mode 100644 (file)
index 0000000..fee09f7
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py b/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py
new file mode 100644 (file)
index 0000000..85808c9
--- /dev/null
@@ -0,0 +1,7 @@
+import mymodule
+import testlib
+
+class MyFunction(testlib.Base):
+
+    def test_tuple_groups(self):
+        self.assertEqual(mymodule.my_function(1, 2, 3), (1, (2, 3)))
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc b/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc
new file mode 100644 (file)
index 0000000..cb6a0b1
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class b/doc/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class
new file mode 100644 (file)
index 0000000..f01b978
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/strings/cat.py b/doc/doc_tests/test_selector_plugin/support/tests/strings/cat.py
new file mode 100644 (file)
index 0000000..3b410e8
--- /dev/null
@@ -0,0 +1,12 @@
+import testlib
+from mypackage import strings
+
+class StringsCat(testlib.Base):
+
+    def test_cat(self):
+        self.assertEqual(strings.cat('one', 'two'), 'onetwo')
+
+
+def test_helper_function():
+    raise Exception(
+        "This test helper function should not be collected")
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc b/doc/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc
new file mode 100644 (file)
index 0000000..80be4e7
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/testlib$py.class b/doc/doc_tests/test_selector_plugin/support/tests/testlib$py.class
new file mode 100644 (file)
index 0000000..fe22fe2
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/testlib$py.class differ
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/testlib.py b/doc/doc_tests/test_selector_plugin/support/tests/testlib.py
new file mode 100644 (file)
index 0000000..92c4f96
--- /dev/null
@@ -0,0 +1,6 @@
+import unittest
+
+class Base(unittest.TestCase):
+    """Use this base class for all tests.
+    """
+    pass
diff --git a/doc/doc_tests/test_selector_plugin/support/tests/testlib.pyc b/doc/doc_tests/test_selector_plugin/support/tests/testlib.pyc
new file mode 100644 (file)
index 0000000..99f5b8e
Binary files /dev/null and b/doc/doc_tests/test_selector_plugin/support/tests/testlib.pyc differ
diff --git a/doc/doc_tests/test_xunit_plugin/support/nosetests.xml b/doc/doc_tests/test_xunit_plugin/support/nosetests.xml
new file mode 100644 (file)
index 0000000..cb1ad4f
--- /dev/null
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?><testsuite name="nosetests" tests="4" errors="1" failures="1" skip="1"><testcase classname="test_skip" name="test_ok" time="0.002" /><testcase classname="test_skip" name="test_err" time="0.002"><error type="exceptions.Exception" message="oh no"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 7, in test_err
+    raise Exception("oh no")
+Exception: oh no
+]]></error></testcase><testcase classname="test_skip" name="test_fail" time="0.003"><failure type="exceptions.AssertionError" message="bye"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 10, in test_fail
+    assert False, "bye"
+AssertionError: bye
+]]></failure></testcase><testcase classname="test_skip" name="test_skip" time="0.002"><skipped type="nose.plugins.skip.SkipTest" message="not me"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 13, in test_skip
+    raise SkipTest("not me")
+SkipTest: not me
+]]></skipped></testcase></testsuite>
\ No newline at end of file
diff --git a/doc/doc_tests/test_xunit_plugin/support/test_skip$py.class b/doc/doc_tests/test_xunit_plugin/support/test_skip$py.class
new file mode 100644 (file)
index 0000000..7dc7e85
Binary files /dev/null and b/doc/doc_tests/test_xunit_plugin/support/test_skip$py.class differ
diff --git a/doc/doc_tests/test_xunit_plugin/support/test_skip.py b/doc/doc_tests/test_xunit_plugin/support/test_skip.py
new file mode 100644 (file)
index 0000000..cb26c41
--- /dev/null
@@ -0,0 +1,13 @@
+from nose.exc import SkipTest
+
+def test_ok():
+    pass
+
+def test_err():
+    raise Exception("oh no")
+
+def test_fail():
+    assert False, "bye"
+
+def test_skip():
+    raise SkipTest("not me")
diff --git a/doc/doc_tests/test_xunit_plugin/support/test_skip.pyc b/doc/doc_tests/test_xunit_plugin/support/test_skip.pyc
new file mode 100644 (file)
index 0000000..2cf730b
Binary files /dev/null and b/doc/doc_tests/test_xunit_plugin/support/test_skip.pyc differ
diff --git a/doc/doc_tests/test_xunit_plugin/test_skips.rst b/doc/doc_tests/test_xunit_plugin/test_skips.rst
new file mode 100644 (file)
index 0000000..c0c3fbc
--- /dev/null
@@ -0,0 +1,40 @@
+XUnit output supports skips
+---------------------------
+
+>>> import os
+>>> from nose.plugins.xunit import Xunit
+>>> from nose.plugins.skip import SkipTest, Skip
+>>> support = os.path.join(os.path.dirname(__file__), 'support')
+>>> outfile = os.path.join(support, 'nosetests.xml')
+>>> from nose.plugins.plugintest import run_buffered as run
+>>> argv = [__file__, '-v', '--with-xunit', support,
+...         '--xunit-file=%s' % outfile]
+>>> run(argv=argv, plugins=[Xunit(), Skip()]) # doctest: +ELLIPSIS
+test_skip.test_ok ... ok
+test_skip.test_err ... ERROR
+test_skip.test_fail ... FAIL
+test_skip.test_skip ... SKIP: not me
+<BLANKLINE>
+======================================================================
+ERROR: test_skip.test_err
+----------------------------------------------------------------------
+Traceback (most recent call last):
+...
+Exception: oh no
+<BLANKLINE>
+======================================================================
+FAIL: test_skip.test_fail
+----------------------------------------------------------------------
+Traceback (most recent call last):
+...
+AssertionError: bye
+<BLANKLINE>
+----------------------------------------------------------------------
+XML: ...nosetests.xml
+----------------------------------------------------------------------
+Ran 4 tests in ...s
+<BLANKLINE>
+FAILED (SKIP=1, errors=1, failures=1)
+
+>>> open(outfile, 'r').read() # doctest: +ELLIPSIS
+'<?xml version="1.0" encoding="UTF-8"?><testsuite name="nosetests" tests="4" errors="1" failures="1" skip="1"><testcase classname="test_skip" name="test_ok" time="..." /><testcase classname="test_skip" name="test_err" time="..."><error type="...Exception" message="oh no">...</error></testcase><testcase classname="test_skip" name="test_fail" time="..."><failure type="...AssertionError" message="bye">...</failure></testcase><testcase classname="test_skip" name="test_skip" time="..."><skipped type="...SkipTest" message="not me">...</skipped></testcase></testsuite>'
diff --git a/doc/docstring.py b/doc/docstring.py
new file mode 100644 (file)
index 0000000..5652bd2
--- /dev/null
@@ -0,0 +1,25 @@
+from docutils import nodes
+from docutils.statemachine import ViewList
+from nose.util import resolve_name
+
+
+def docstring_directive(dirname, arguments, options, content, lineno,
+                        content_offset, block_text, state, state_machine):
+    obj_name = arguments[0]
+    obj = resolve_name(obj_name)
+    rst = ViewList()
+    rst.append(obj.__doc__, '<docstring>')
+    print "CALLED", obj_name, obj, rst
+    node = nodes.section()
+    surrounding_title_styles = state.memo.title_styles
+    surrounding_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    state.nested_parse(rst, 0, node, match_titles=1)
+    state.memo.title_styles = surrounding_title_styles
+    state.memo.section_level = surrounding_section_level
+    return node.children
+
+
+def setup(app):
+    app.add_directive('docstring', docstring_directive, 1, (1, 0, 1))
diff --git a/doc/finding_tests.rst b/doc/finding_tests.rst
new file mode 100644 (file)
index 0000000..5f9cb74
--- /dev/null
@@ -0,0 +1,32 @@
+Finding and running tests
+-------------------------
+
+nose, by default, follows a few simple rules for test discovery.
+
+* If it looks like a test, it's a test. Names of directories, modules,
+  classes and functions are compared against the testMatch regular
+  expression, and those that match are considered tests. Any class that is a
+  `unittest.TestCase` subclass is also collected, so long as it is inside of a
+  module that looks like a test.
+   
+* Directories that don't look like tests and aren't packages are not
+  inspected.
+
+* Packages are always inspected, but they are only collected if they look
+  like tests. This means that you can include your tests inside of your
+  packages (somepackage/tests) and nose will collect the tests without
+  running package code inappropriately.
+
+* When a project appears to have library and test code organized into
+  separate directories, library directories are examined first.
+
+* When nose imports a module, it adds that module's directory to sys.path;
+  when the module is inside of a package, like package.module, it will be
+  loaded as package.module and the directory of *package* will be added to
+  sys.path.
+
+* If an object defines a __test__ attribute that does not evaluate to
+  True, that object will not be collected, nor will any objects it
+  contains.
+
+Be aware that plugins and command line options can change any of those rules.
\ No newline at end of file
diff --git a/doc/further_reading.rst b/doc/further_reading.rst
new file mode 100644 (file)
index 0000000..4a93553
--- /dev/null
@@ -0,0 +1,34 @@
+Further reading
+===============
+
+.. toctree ::
+   :maxdepth: 2
+
+   doc_tests/test_addplugins/test_addplugins.rst
+   doc_tests/test_coverage_html/coverage_html.rst
+   doc_tests/test_doctest_fixtures/doctest_fixtures.rst
+   doc_tests/test_init_plugin/init_plugin.rst
+   doc_tests/test_issue089/unwanted_package.rst
+   doc_tests/test_issue097/plugintest_environment.rst
+   doc_tests/test_issue107/plugin_exceptions.rst
+   doc_tests/test_issue119/empty_plugin.rst
+   doc_tests/test_issue142/errorclass_failure.rst
+   doc_tests/test_issue145/imported_tests.rst
+   doc_tests/test_multiprocess/multiprocess.rst
+   doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst
+   doc_tests/test_selector_plugin/selector_plugin.rst
+   doc_tests/test_allmodules/test_allmodules.rst
+   more_info
+
+Articles, etc
+-------------
+
+* `An Extended Introduction to the nose Unit Testing Framework`_:
+  Titus Brown's excellent article provides a great overview of
+  nose and its uses.
+* `My blog`_
+* `Tweets`_
+
+.. _`An Extended Introduction to the nose Unit Testing Framework` : http://ivory.idyll.org/articles/nose-intro.html
+.. _`My blog` : http://somethingaboutorange.com/mrl/
+.. _`Tweets` : http://twitter.com/jpellerin
\ No newline at end of file
diff --git a/doc/index.html b/doc/index.html
new file mode 100644 (file)
index 0000000..cd80895
--- /dev/null
@@ -0,0 +1,8 @@
+<html>
+  <head><title>Redirecting to nose 0.11 docs</title>
+    <meta http-equiv="refresh" content="0; ./1.0.0/"/>
+  </head>
+  <body>
+    <p>Redirecting to <a href="./1.0.0/">nose 1.0 docs</a></p>
+  </body>
+</html>
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644 (file)
index 0000000..eeb1805
--- /dev/null
@@ -0,0 +1,74 @@
+.. nose documentation master file, created by sphinx-quickstart on Thu Mar 26 16:49:00 2009.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+
+Installation and quick start
+============================
+
+*On most UNIX-like systems, you'll probably need to run these commands as root
+or using sudo.*
+
+Install nose using setuptools/distribute::
+
+  easy_install nose
+
+Or pip::
+
+  pip install nose
+
+Or, if you don't have setuptools/distribute installed, use the download
+link at right to download the source package, and install it in the
+normal fashion: Ungzip and untar the source package, cd to the new
+directory, and::
+
+  python setup.py install
+
+However, **please note** that without setuptools/distribute installed,
+you will not be able to use third-party nose plugins.
+
+This will install the nose libraries, as well as the :doc:`nosetests <usage>`
+script, which you can use to automatically discover and run tests.
+
+Now you can run tests for your project::
+
+  cd path/to/project
+  nosetests
+
+You should see output something like this::
+
+  ..................................
+  ----------------------------------------------------------------------
+  Ran 34 tests in 1.440s
+
+  OK
+
+Indicating that nose found and ran your tests.
+
+For help with nosetests' many command-line options, try::
+
+  nosetests -h
+
+or visit the :doc:`usage documentation <usage>`.
+
+
+Python3
+=======
+
+nose supports python3. Building from source on python3 requires
+`distribute <http://packages.python.org/distribute/>`_. If you don't
+have distribute installed, ``python3 setup.py install`` will install
+it via distribute's bootstrap script.
+
+.. warning ::
+
+   nose itself supports python 3, but many 3rd-party plugins do not!
+
+
+.. toctree::
+   :hidden:
+
+   testing
+   developing
+   news
+   further_reading
diff --git a/doc/man.rst b/doc/man.rst
new file mode 100644 (file)
index 0000000..6318b24
--- /dev/null
@@ -0,0 +1,24 @@
+===========
+ nosetests
+===========
+
+------------------------
+nicer testing for python
+------------------------
+
+:Author: jpellerin+nose@gmail.com
+:Date:   2009-04-23
+:Copyright: LGPL
+:Version: 0.11
+:Manual section: 1
+:Manual group: User Commands
+
+SYNOPSIS
+========
+
+  nosetests [options] [names]
+
+DESCRIPTION
+===========
+
+.. autohelp ::
diff --git a/doc/manbuilder.py b/doc/manbuilder.py
new file mode 100644 (file)
index 0000000..463d58b
--- /dev/null
@@ -0,0 +1,24 @@
+from manpage import Writer
+from sphinx.builders.text import TextBuilder
+
+
+class ManBuilder(TextBuilder):
+    name = 'manpage'
+    format = 'man'
+    out_suffix = '.man'
+
+    def prepare_writing(self, docnames):
+        self.writer = ManpageWriter(self)
+
+
+class ManpageWriter(Writer):
+    def __init__(self, builder):
+        self.builder = builder
+        Writer.__init__(self)
+
+
+def setup(app):
+    app.add_builder(ManBuilder)
+
+
+
diff --git a/doc/manbuilder.pyc b/doc/manbuilder.pyc
new file mode 100644 (file)
index 0000000..355955a
Binary files /dev/null and b/doc/manbuilder.pyc differ
diff --git a/doc/manpage.py b/doc/manpage.py
new file mode 100644 (file)
index 0000000..7935510
--- /dev/null
@@ -0,0 +1,1119 @@
+# $Id: manpage.py 5901 2009-04-07 13:26:48Z grubert $
+# Author: Engelbert Gruber <grubert@users.sourceforge.net>
+# Copyright: This module is put into the public domain.
+
+"""
+Simple man page writer for reStructuredText.
+
+Man pages (short for "manual pages") contain system documentation on unix-like
+systems. The pages are grouped in numbered sections: 
+
+ 1 executable programs and shell commands
+ 2 system calls
+ 3 library functions
+ 4 special files
+ 5 file formats
+ 6 games
+ 7 miscellaneous
+ 8 system administration
+
+Man pages are written *troff*, a text file formatting system.
+
+See http://www.tldp.org/HOWTO/Man-Page for a start.
+
+Man pages have no subsection only parts.
+Standard parts
+
+  NAME ,
+  SYNOPSIS ,
+  DESCRIPTION ,
+  OPTIONS ,
+  FILES ,
+  SEE ALSO ,
+  BUGS ,
+
+and
+
+  AUTHOR .
+
+A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
+by the command whatis or apropos.
+
+"""
+
+# NOTE: the macros only work when at line start, so try the rule
+#       start new lines in visit_ functions.
+
+__docformat__ = 'reStructuredText'
+
+import sys
+import os
+import time
+import re
+from types import ListType
+
+import docutils
+from docutils import nodes, utils, writers, languages
+
+FIELD_LIST_INDENT = 7
+DEFINITION_LIST_INDENT = 7
+OPTION_LIST_INDENT = 7
+BLOCKQOUTE_INDENT = 3.5
+
+# Define two macros so man/roff can calculate the
+# indent/unindent margins by itself
+MACRO_DEF = (r"""
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level magin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+""")
+
+class Writer(writers.Writer):
+
+    supported = ('manpage')
+    """Formats this writer supports."""
+
+    output = None
+    """Final translated form of `document`."""
+
+    def __init__(self):
+        writers.Writer.__init__(self)
+        self.translator_class = Translator
+
+    def translate(self):
+        visitor = self.translator_class(self.document)
+        self.document.walkabout(visitor)
+        self.output = visitor.astext()
+
+
+class Table:
+    def __init__(self):
+        self._rows = []
+        self._options = ['center', ]
+        self._tab_char = '\t'
+        self._coldefs = []
+    def new_row(self):
+        self._rows.append([])
+    def append_cell(self, cell_lines):
+        """cell_lines is an array of lines"""
+        self._rows[-1].append(cell_lines)
+        if len(self._coldefs) < len(self._rows[-1]):
+            self._coldefs.append('l')
+    def astext(self):
+        text = '.TS\n'
+        text += ' '.join(self._options) + ';\n'
+        text += '|%s|.\n' % ('|'.join(self._coldefs))
+        for row in self._rows:
+            # row = array of cells. cell = array of lines.
+            # line above 
+            text += '_\n'
+            max_lns_in_cell = 0
+            for cell in row:
+                max_lns_in_cell = max(len(cell), max_lns_in_cell)
+            for ln_cnt in range(max_lns_in_cell):
+                line = []
+                for cell in row:
+                    if len(cell) > ln_cnt:
+                        line.append(cell[ln_cnt])
+                    else:
+                        line.append(" ")
+                text += self._tab_char.join(line) + '\n'
+        text += '_\n'
+        text += '.TE\n'
+        return text
+
+class Translator(nodes.NodeVisitor):
+    """"""
+
+    words_and_spaces = re.compile(r'\S+| +|\n')
+    document_start = """Man page generated from reStructeredText."""
+
+    def __init__(self, document):
+        nodes.NodeVisitor.__init__(self, document)
+        self.settings = settings = document.settings
+        lcode = settings.language_code
+        self.language = languages.get_language(lcode, document.reporter)
+        self.head = []
+        self.body = []
+        self.foot = []
+        self.section_level = -1
+        self.context = []
+        self.topic_class = ''
+        self.colspecs = []
+        self.compact_p = 1
+        self.compact_simple = None
+        # the list style "*" bullet or "#" numbered
+        self._list_char = []
+        # writing the header .TH and .SH NAME is postboned after
+        # docinfo.
+        self._docinfo = {
+                "title" : "", "subtitle" : "",
+                "manual_section" : "", "manual_group" : "",
+                "author" : "", 
+                "date" : "", 
+                "copyright" : "",
+                "version" : "",
+                    }
+        self._in_docinfo = 1 # FIXME docinfo not being found?
+        self._active_table = None
+        self._in_entry = None
+        self.header_written = 0
+        self.authors = []
+        self.section_level = -1
+        self._indent = [0]
+        # central definition of simple processing rules
+        # what to output on : visit, depart
+        self.defs = {
+                'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
+                'definition' : ('', ''),
+                'definition_list' : ('', '.TP 0\n'),
+                'definition_list_item' : ('\n.TP', ''),
+                #field_list
+                #field
+                'field_name' : ('\n.TP\n.B ', '\n'),
+                'field_body' : ('', '.RE\n', ),
+                'literal' : ('\\fB', '\\fP'),
+                'literal_block' : ('\n.nf\n', '\n.fi\n'),
+
+                #option_list
+                'option_list_item' : ('\n.TP', ''),
+                #option_group, option
+                'description' : ('\n', ''),
+                
+                'reference' : (r'\fI\%', r'\fP'),
+                #'target'   : (r'\fI\%', r'\fP'),
+                'emphasis': ('\\fI', '\\fP'),
+                'strong' : ('\\fB', '\\fP'),
+                'term' : ('\n.B ', '\n'),
+                'title_reference' : ('\\fI', '\\fP'),
+
+                'problematic' : ('\n.nf\n', '\n.fi\n'),
+                # docinfo fields.
+                'address' : ('\n.nf\n', '\n.fi\n'),
+                'organization' : ('\n.nf\n', '\n.fi\n'),
+                    }
+        # TODO dont specify the newline before a dot-command, but ensure
+        # check it is there.
+
+    def comment_begin(self, text):
+        """Return commented version of the passed text WITHOUT end of line/comment."""
+        prefix = '\n.\\" '
+        return prefix+prefix.join(text.split('\n'))
+
+    def comment(self, text):
+        """Return commented version of the passed text."""
+        return self.comment_begin(text)+'\n'
+
+    def astext(self):
+        """Return the final formatted document as a string."""
+        if not self.header_written:
+            # ensure we get a ".TH" as viewers require it.
+            self.head.append(self.header())
+        return ''.join(self.head + self.body + self.foot)
+
+    def visit_Text(self, node):
+        text = node.astext().replace('-','\-')
+        text = text.replace("'","\\'")
+        self.body.append(text)
+
+    def depart_Text(self, node):
+        pass
+
+    def list_start(self, node):
+        class enum_char:
+            enum_style = {
+                    'arabic'     : (3,1),
+                    'loweralpha' : (3,'a'),
+                    'upperalpha' : (3,'A'),
+                    'lowerroman' : (5,'i'),
+                    'upperroman' : (5,'I'),
+                    'bullet'     : (2,'\\(bu'),
+                    'emdash'     : (2,'\\(em'),
+                     }
+            def __init__(self, style):
+                if style == 'arabic':
+                    if node.has_key('start'):
+                        start = node['start']
+                    else:
+                        start = 1
+                    self._style = (
+                            len(str(len(node.children)))+2,
+                            start )
+                # BUG: fix start for alpha
+                else:
+                    self._style = self.enum_style[style]
+                self._cnt = -1
+            def next(self):
+                self._cnt += 1
+                # BUG add prefix postfix
+                try:
+                    return "%d." % (self._style[1] + self._cnt)
+                except:
+                    if self._style[1][0] == '\\':
+                        return self._style[1]
+                    # BUG romans dont work
+                    # BUG alpha only a...z
+                    return "%c." % (ord(self._style[1])+self._cnt)
+            def get_width(self):
+                return self._style[0]
+            def __repr__(self):
+                return 'enum_style%r' % list(self._style)
+
+        if node.has_key('enumtype'):
+            self._list_char.append(enum_char(node['enumtype']))
+        else:
+            self._list_char.append(enum_char('bullet'))
+        if len(self._list_char) > 1:
+            # indent nested lists
+            # BUG indentation depends on indentation of parent list.
+            self.indent(self._list_char[-2].get_width())
+        else:
+            self.indent(self._list_char[-1].get_width())
+
+    def list_end(self):
+        self.dedent()
+        self._list_char.pop()
+
+    def header(self):
+        tmpl = (".TH %(title)s %(manual_section)s"
+                " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
+                ".SH NAME\n"
+                "%(title)s \- %(subtitle)s\n")
+        return tmpl % self._docinfo
+
+    def append_header(self):
+        """append header with .TH and .SH NAME"""
+        # TODO before everything
+        # .TH title section date source manual
+        if self.header_written:
+            return
+        self.body.append(self.header())
+        self.body.append(MACRO_DEF)
+        self.header_written = 1
+
+    def visit_address(self, node):
+        self._docinfo['address'] = node.astext()
+        raise nodes.SkipNode
+
+    def depart_address(self, node):
+        pass
+
+    def visit_admonition(self, node, name):
+        self.visit_block_quote(node)
+
+    def depart_admonition(self):
+        self.depart_block_quote(None)
+
+    def visit_attention(self, node):
+        self.visit_admonition(node, 'attention')
+
+    def depart_attention(self, node):
+        self.depart_admonition()
+
+    def visit_author(self, node):
+        self._docinfo['author'] = node.astext()
+        raise nodes.SkipNode
+
+    def depart_author(self, node):
+        pass
+
+    def visit_authors(self, node):
+        self.body.append(self.comment('visit_authors'))
+
+    def depart_authors(self, node):
+        self.body.append(self.comment('depart_authors'))
+
+    def visit_block_quote(self, node):
+        #self.body.append(self.comment('visit_block_quote'))
+        # BUG/HACK: indent alway uses the _last_ indention,
+        # thus we need two of them.
+        self.indent(BLOCKQOUTE_INDENT)
+        self.indent(0)
+
+    def depart_block_quote(self, node):
+        #self.body.append(self.comment('depart_block_quote'))
+        self.dedent()
+        self.dedent()
+
+    def visit_bullet_list(self, node):
+        self.list_start(node)
+
+    def depart_bullet_list(self, node):
+        self.list_end()
+
+    def visit_caption(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
+
+    def depart_caption(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</p>\n')
+
+    def visit_caution(self, node):
+        self.visit_admonition(node, 'caution')
+
+    def depart_caution(self, node):
+        self.depart_admonition()
+
+    def visit_citation(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'table', CLASS='citation',
+                                       frame="void", rules="none"))
+        self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
+                         '<col />\n'
+                         '<tbody valign="top">\n'
+                         '<tr>')
+        self.footnote_backrefs(node)
+
+    def depart_citation(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</td></tr>\n'
+                         '</tbody>\n</table>\n')
+
+    def visit_citation_reference(self, node):
+        raise NotImplementedError, node.astext()
+        href = ''
+        if node.has_key('refid'):
+            href = '#' + node['refid']
+        elif node.has_key('refname'):
+            href = '#' + self.document.nameids[node['refname']]
+        self.body.append(self.starttag(node, 'a', '[', href=href,
+                                       CLASS='citation-reference'))
+
+    def depart_citation_reference(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(']</a>')
+
+    def visit_classifier(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(' <span class="classifier-delimiter">:</span> ')
+        self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
+
+    def depart_classifier(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</span>')
+
+    def visit_colspec(self, node):
+        self.colspecs.append(node)
+
+    def depart_colspec(self, node):
+        pass
+
+    def write_colspecs(self):
+        self.body.append("%s.\n" % ('L '*len(self.colspecs)))
+
+    def visit_comment(self, node,
+                      sub=re.compile('-(?=-)').sub):
+        self.body.append(self.comment(node.astext()))
+        raise nodes.SkipNode
+
+    def visit_contact(self, node):
+        self.visit_docinfo_item(node, 'contact')
+
+    def depart_contact(self, node):
+        self.depart_docinfo_item()
+
+    def visit_copyright(self, node):
+        self._docinfo['copyright'] = node.astext()
+        raise nodes.SkipNode
+
+    def visit_danger(self, node):
+        self.visit_admonition(node, 'danger')
+
+    def depart_danger(self, node):
+        self.depart_admonition()
+
+    def visit_date(self, node):
+        self._docinfo['date'] = node.astext()
+        raise nodes.SkipNode
+
+    def visit_decoration(self, node):
+        pass
+
+    def depart_decoration(self, node):
+        pass
+
+    def visit_definition(self, node):
+        self.body.append(self.defs['definition'][0])
+
+    def depart_definition(self, node):
+        self.body.append(self.defs['definition'][1])
+
+    def visit_definition_list(self, node):
+        self.indent(DEFINITION_LIST_INDENT)
+
+    def depart_definition_list(self, node):
+        self.dedent()
+
+    def visit_definition_list_item(self, node):
+        self.body.append(self.defs['definition_list_item'][0])
+
+    def depart_definition_list_item(self, node):
+        self.body.append(self.defs['definition_list_item'][1])
+
+    def visit_description(self, node):
+        self.body.append(self.defs['description'][0])
+
+    def depart_description(self, node):
+        self.body.append(self.defs['description'][1])
+
+    def visit_docinfo(self, node):
+        self._in_docinfo = 1
+
+    def depart_docinfo(self, node):
+        self._in_docinfo = None
+        # TODO nothing should be written before this
+        self.append_header()
+
+    def visit_docinfo_item(self, node, name):
+        self.body.append(self.comment('%s: ' % self.language.labels[name]))
+        if len(node):
+            return
+            if isinstance(node[0], nodes.Element):
+                node[0].set_class('first')
+            if isinstance(node[0], nodes.Element):
+                node[-1].set_class('last')
+
+    def depart_docinfo_item(self):
+        pass
+
+    def visit_doctest_block(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
+
+    def depart_doctest_block(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('\n</pre>\n')
+
+    def visit_document(self, node):
+        self.body.append(self.comment(self.document_start).lstrip())
+        # writing header is postboned
+        self.header_written = 0
+
+    def depart_document(self, node):
+        if self._docinfo['author']:
+            self.body.append('\n.SH AUTHOR\n%s\n' 
+                    % self._docinfo['author'])
+        if 'organization' in self._docinfo:
+            self.body.append(self.defs['organization'][0])
+            self.body.append(self._docinfo['organization'])
+            self.body.append(self.defs['organization'][1])
+        if 'address' in self._docinfo:
+            self.body.append(self.defs['address'][0])
+            self.body.append(self._docinfo['address'])
+            self.body.append(self.defs['address'][1])
+        if self._docinfo['copyright']:
+            self.body.append('\n.SH COPYRIGHT\n%s\n' 
+                    % self._docinfo['copyright'])
+        self.body.append(
+                self.comment(
+                        'Generated by docutils manpage writer on %s.\n' 
+                        % (time.strftime('%Y-%m-%d %H:%M')) ) )
+
+    def visit_emphasis(self, node):
+        self.body.append(self.defs['emphasis'][0])
+
+    def depart_emphasis(self, node):
+        self.body.append(self.defs['emphasis'][1])
+
+    def visit_entry(self, node):
+        # BUG entries have to be on one line separated by tab force it.
+        self.context.append(len(self.body))
+        self._in_entry = 1
+
+    def depart_entry(self, node):
+        start = self.context.pop()
+        self._active_table.append_cell(self.body[start:])
+        del self.body[start:]
+        self._in_entry = 0
+
+    def visit_enumerated_list(self, node):
+        self.list_start(node)
+
+    def depart_enumerated_list(self, node):
+        self.list_end()
+
+    def visit_error(self, node):
+        self.visit_admonition(node, 'error')
+
+    def depart_error(self, node):
+        self.depart_admonition()
+
+    def visit_field(self, node):
+        #self.body.append(self.comment('visit_field'))
+        pass
+
+    def depart_field(self, node):
+        #self.body.append(self.comment('depart_field'))
+        pass
+
+    def visit_field_body(self, node):
+        #self.body.append(self.comment('visit_field_body'))
+        if self._in_docinfo:
+            self._docinfo[
+                    self._field_name.lower().replace(" ","_")] = node.astext()
+            raise nodes.SkipNode
+
+    def depart_field_body(self, node):
+        pass
+
+    def visit_field_list(self, node):
+        self.indent(FIELD_LIST_INDENT)
+
+    def depart_field_list(self, node):
+        self.dedent('depart_field_list')
+
+    def visit_field_name(self, node):
+        if self._in_docinfo:
+            self._in_docinfo = 1
+            self._field_name = node.astext()
+            raise nodes.SkipNode
+        else:
+            self.body.append(self.defs['field_name'][0])
+
+    def depart_field_name(self, node):
+        self.body.append(self.defs['field_name'][1])
+
+    def visit_figure(self, node):
+        raise NotImplementedError, node.astext()
+
+    def depart_figure(self, node):
+        raise NotImplementedError, node.astext()
+
+    def visit_footer(self, node):
+        raise NotImplementedError, node.astext()
+
+    def depart_footer(self, node):
+        raise NotImplementedError, node.astext()
+        start = self.context.pop()
+        footer = (['<hr class="footer"/>\n',
+                   self.starttag(node, 'div', CLASS='footer')]
+                  + self.body[start:] + ['</div>\n'])
+        self.body_suffix[:0] = footer
+        del self.body[start:]
+
+    def visit_footnote(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'table', CLASS='footnote',
+                                       frame="void", rules="none"))
+        self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
+                         '<tbody valign="top">\n'
+                         '<tr>')
+        self.footnote_backrefs(node)
+
+    def footnote_backrefs(self, node):
+        raise NotImplementedError, node.astext()
+        if self.settings.footnote_backlinks and node.hasattr('backrefs'):
+            backrefs = node['backrefs']
+            if len(backrefs) == 1:
+                self.context.append('')
+                self.context.append('<a class="fn-backref" href="#%s" '
+                                    'name="%s">' % (backrefs[0], node['id']))
+            else:
+                i = 1
+                backlinks = []
+                for backref in backrefs:
+                    backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
+                                     % (backref, i))
+                    i += 1
+                self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
+                self.context.append('<a name="%s">' % node['id'])
+        else:
+            self.context.append('')
+            self.context.append('<a name="%s">' % node['id'])
+
+    def depart_footnote(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</td></tr>\n'
+                         '</tbody>\n</table>\n')
+
+    def visit_footnote_reference(self, node):
+        raise NotImplementedError, node.astext()
+        href = ''
+        if node.has_key('refid'):
+            href = '#' + node['refid']
+        elif node.has_key('refname'):
+            href = '#' + self.document.nameids[node['refname']]
+        format = self.settings.footnote_references
+        if format == 'brackets':
+            suffix = '['
+            self.context.append(']')
+        elif format == 'superscript':
+            suffix = '<sup>'
+            self.context.append('</sup>')
+        else:                           # shouldn't happen
+            suffix = '???'
+            self.content.append('???')
+        self.body.append(self.starttag(node, 'a', suffix, href=href,
+                                       CLASS='footnote-reference'))
+
+    def depart_footnote_reference(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.context.pop() + '</a>')
+
+    def visit_generated(self, node):
+        pass
+
+    def depart_generated(self, node):
+        pass
+
+    def visit_header(self, node):
+        raise NotImplementedError, node.astext()
+        self.context.append(len(self.body))
+
+    def depart_header(self, node):
+        raise NotImplementedError, node.astext()
+        start = self.context.pop()
+        self.body_prefix.append(self.starttag(node, 'div', CLASS='header'))
+        self.body_prefix.extend(self.body[start:])
+        self.body_prefix.append('<hr />\n</div>\n')
+        del self.body[start:]
+
+    def visit_hint(self, node):
+        self.visit_admonition(node, 'hint')
+
+    def depart_hint(self, node):
+        self.depart_admonition()
+
+    def visit_image(self, node):
+        raise NotImplementedError, node.astext()
+        atts = node.attributes.copy()
+        atts['src'] = atts['uri']
+        del atts['uri']
+        if not atts.has_key('alt'):
+            atts['alt'] = atts['src']
+        if isinstance(node.parent, nodes.TextElement):
+            self.context.append('')
+        else:
+            self.body.append('<p>')
+            self.context.append('</p>\n')
+        self.body.append(self.emptytag(node, 'img', '', **atts))
+
+    def depart_image(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.context.pop())
+
+    def visit_important(self, node):
+        self.visit_admonition(node, 'important')
+
+    def depart_important(self, node):
+        self.depart_admonition()
+
+    def visit_label(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
+                                       CLASS='label'))
+
+    def depart_label(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(']</a></td><td>%s' % self.context.pop())
+
+    def visit_legend(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append(self.starttag(node, 'div', CLASS='legend'))
+
+    def depart_legend(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</div>\n')
+
+    def visit_line_block(self, node):
+        self.body.append('\n')
+
+    def depart_line_block(self, node):
+        self.body.append('\n')
+
+    def visit_line(self, node):
+        pass
+
+    def depart_line(self, node):
+        self.body.append('\n.br\n')
+
+    def visit_list_item(self, node):
+        # man 7 man argues to use ".IP" instead of ".TP"
+        self.body.append('\n.IP %s %d\n' % (
+                self._list_char[-1].next(),
+                self._list_char[-1].get_width(),) )
+
+    def depart_list_item(self, node):
+        pass
+
+    def visit_literal(self, node):
+        self.body.append(self.defs['literal'][0])
+
+    def depart_literal(self, node):
+        self.body.append(self.defs['literal'][1])
+
+    def visit_literal_block(self, node):
+        self.body.append(self.defs['literal_block'][0])
+
+    def depart_literal_block(self, node):
+        self.body.append(self.defs['literal_block'][1])
+
+    def visit_meta(self, node):
+        raise NotImplementedError, node.astext()
+        self.head.append(self.emptytag(node, 'meta', **node.attributes))
+
+    def depart_meta(self, node):
+        pass
+
+    def visit_note(self, node):
+        self.visit_admonition(node, 'note')
+
+    def depart_note(self, node):
+        self.depart_admonition()
+
+    def indent(self, by=0.5):
+        # if we are in a section ".SH" there already is a .RS
+        #self.body.append('\n[[debug: listchar: %r]]\n' % map(repr, self._list_char))
+        #self.body.append('\n[[debug: indent %r]]\n' % self._indent)
+        step = self._indent[-1]
+        self._indent.append(by)
+        self.body.append(self.defs['indent'][0] % step)
+
+    def dedent(self, name=''):
+        #self.body.append('\n[[debug: dedent %s %r]]\n' % (name, self._indent))
+        self._indent.pop()
+        self.body.append(self.defs['indent'][1])
+
+    def visit_option_list(self, node):
+        self.indent(OPTION_LIST_INDENT)
+
+    def depart_option_list(self, node):
+        self.dedent()
+
+    def visit_option_list_item(self, node):
+        # one item of the list
+        self.body.append(self.defs['option_list_item'][0])
+
+    def depart_option_list_item(self, node):
+        self.body.append(self.defs['option_list_item'][1])
+
+    def visit_option_group(self, node):
+        # as one option could have several forms it is a group
+        # options without parameter bold only, .B, -v
+        # options with parameter bold italic, .BI, -f file
+        
+        # we do not know if .B or .BI
+        self.context.append('.B')           # blind guess
+        self.context.append(len(self.body)) # to be able to insert later
+        self.context.append(0)              # option counter
+
+    def depart_option_group(self, node):
+        self.context.pop()  # the counter
+        start_position = self.context.pop()
+        text = self.body[start_position:]
+        del self.body[start_position:]
+        self.body.append('\n%s%s' % (self.context.pop(), ''.join(text)))
+
+    def visit_option(self, node):
+        # each form of the option will be presented separately
+        if self.context[-1]>0:
+            self.body.append(' ,')
+        if self.context[-3] == '.BI':
+            self.body.append('\\')
+        self.body.append(' ')
+
+    def depart_option(self, node):
+        self.context[-1] += 1
+
+    def visit_option_string(self, node):
+        # do not know if .B or .BI
+        pass
+
+    def depart_option_string(self, node):
+        pass
+
+    def visit_option_argument(self, node):
+        self.context[-3] = '.BI' # bold/italic alternate
+        if node['delimiter'] != ' ':
+            self.body.append('\\fn%s ' % node['delimiter'] )
+        elif self.body[len(self.body)-1].endswith('='):
+            # a blank only means no blank in output, just changing font
+            self.body.append(' ')
+        else:
+            # backslash blank blank
+            self.body.append('\\  ')
+
+    def depart_option_argument(self, node):
+        pass
+
+    def visit_organization(self, node):
+        self._docinfo['organization'] = node.astext()
+        raise nodes.SkipNode
+
+    def depart_organization(self, node):
+        pass
+
+    def visit_paragraph(self, node):
+        # BUG every but the first paragraph in a list must be intended
+        # TODO .PP or new line
+        return
+
+    def depart_paragraph(self, node):
+        # TODO .PP or an empty line
+        if not self._in_entry:
+            self.body.append('\n\n')
+
+    def visit_problematic(self, node):
+        self.body.append(self.defs['problematic'][0])
+
+    def depart_problematic(self, node):
+        self.body.append(self.defs['problematic'][1])
+
+    def visit_raw(self, node):
+        if node.get('format') == 'manpage':
+            self.body.append(node.astext())
+        # Keep non-manpage raw text out of output:
+        raise nodes.SkipNode
+
+    def visit_reference(self, node):
+        """E.g. link or email address."""
+        self.body.append(self.defs['reference'][0])
+
+    def depart_reference(self, node):
+        self.body.append(self.defs['reference'][1])
+
+    def visit_revision(self, node):
+        self.visit_docinfo_item(node, 'revision')
+
+    def depart_revision(self, node):
+        self.depart_docinfo_item()
+
+    def visit_row(self, node):
+        self._active_table.new_row()
+
+    def depart_row(self, node):
+        pass
+
+    def visit_section(self, node):
+        self.section_level += 1
+
+    def depart_section(self, node):
+        self.section_level -= 1    
+
+    def visit_status(self, node):
+        raise NotImplementedError, node.astext()
+        self.visit_docinfo_item(node, 'status', meta=None)
+
+    def depart_status(self, node):
+        self.depart_docinfo_item()
+
+    def visit_strong(self, node):
+        self.body.append(self.defs['strong'][1])
+
+    def depart_strong(self, node):
+        self.body.append(self.defs['strong'][1])
+
+    def visit_substitution_definition(self, node):
+        """Internal only."""
+        raise nodes.SkipNode
+
+    def visit_substitution_reference(self, node):
+        self.unimplemented_visit(node)
+
+    def visit_subtitle(self, node):
+        self._docinfo["subtitle"] = node.astext()
+        raise nodes.SkipNode
+
+    def visit_system_message(self, node):
+        # TODO add report_level
+        #if node['level'] < self.document.reporter['writer'].report_level:
+            # Level is too low to display:
+        #    raise nodes.SkipNode
+        self.body.append('\.SH system-message\n')
+        attr = {}
+        backref_text = ''
+        if node.hasattr('id'):
+            attr['name'] = node['id']
+        if node.hasattr('line'):
+            line = ', line %s' % node['line']
+        else:
+            line = ''
+        self.body.append('System Message: %s/%s (%s:%s)\n'
+                         % (node['type'], node['level'], node['source'], line))
+
+    def depart_system_message(self, node):
+        self.body.append('\n')
+
+    def visit_table(self, node):
+        self._active_table = Table()
+
+    def depart_table(self, node):
+        self.body.append(self._active_table.astext())
+        self._active_table = None
+
+    def visit_target(self, node):
+        self.body.append(self.comment('visit_target'))
+        #self.body.append(self.defs['target'][0])
+        #self.body.append(node['refuri'])
+
+    def depart_target(self, node):
+        self.body.append(self.comment('depart_target'))
+        #self.body.append(self.defs['target'][1])
+
+    def visit_tbody(self, node):
+        pass
+
+    def depart_tbody(self, node):
+        pass
+
+    def visit_term(self, node):
+        self.body.append(self.defs['term'][0])
+
+    def depart_term(self, node):
+        self.body.append(self.defs['term'][1])
+
+    def visit_tgroup(self, node):
+        pass
+
+    def depart_tgroup(self, node):
+        pass
+
+    def visit_compound(self, node):
+        pass
+
+    def depart_compound(self, node):
+        pass
+
+    def visit_thead(self, node):
+        raise NotImplementedError, node.astext()
+        self.write_colspecs()
+        self.body.append(self.context.pop()) # '</colgroup>\n'
+        # There may or may not be a <thead>; this is for <tbody> to use:
+        self.context.append('')
+        self.body.append(self.starttag(node, 'thead', valign='bottom'))
+
+    def depart_thead(self, node):
+        raise NotImplementedError, node.astext()
+        self.body.append('</thead>\n')
+
+    def visit_tip(self, node):
+        self.visit_admonition(node, 'tip')
+
+    def depart_tip(self, node):
+        self.depart_admonition()
+
+    def visit_title(self, node):
+        if isinstance(node.parent, nodes.topic):
+            self.body.append(self.comment('topic-title'))
+        elif isinstance(node.parent, nodes.sidebar):
+            self.body.append(self.comment('sidebar-title'))
+        elif isinstance(node.parent, nodes.admonition):
+            self.body.append(self.comment('admonition-title'))
+        elif self.section_level == 0:
+            # document title for .TH
+            self._docinfo['title'] = node.astext()
+            raise nodes.SkipNode
+        elif self.section_level == 1:
+            self._docinfo['subtitle'] = node.astext()
+            raise nodes.SkipNode
+        elif self.section_level == 2:
+            self.body.append('\n.SH ')
+        else:
+            self.body.append('\n.SS ')
+
+    def depart_title(self, node):
+        self.body.append('\n')
+
+    def visit_title_reference(self, node):
+        """inline citation reference"""
+        self.body.append(self.defs['title_reference'][0])
+
+    def depart_title_reference(self, node):
+        self.body.append(self.defs['title_reference'][1])
+
+    def visit_topic(self, node):
+        self.body.append(self.comment('topic: '+node.astext()))
+        raise nodes.SkipNode
+        ##self.topic_class = node.get('class')
+
+    def depart_topic(self, node):
+        ##self.topic_class = ''
+        pass
+
+    def visit_transition(self, node):
+        # .PP      Begin a new paragraph and reset prevailing indent.
+        # .sp N    leaves N lines of blank space.
+        # .ce      centers the next line
+        self.body.append('\n.sp\n.ce\n----\n')
+
+    def depart_transition(self, node):
+        self.body.append('\n.ce 0\n.sp\n')
+
+    def visit_version(self, node):
+        self._docinfo["version"] = node.astext()
+        raise nodes.SkipNode
+
+    def visit_warning(self, node):
+        self.visit_admonition(node, 'warning')
+
+    def depart_warning(self, node):
+        self.depart_admonition()
+
+    def visit_index(self, node):
+        pass
+
+    def depart_index(self, node):
+        pass
+
+    def visit_desc(self, node):
+        pass
+
+    def depart_desc(self, node):
+        pass
+
+    def visit_desc_signature(self, node):
+        # .. cmdoption makes options look like this
+        self.body.append('\n')
+        self.body.append('.TP')
+        self.body.append('\n')
+
+    def depart_desc_signature(self, node):
+        pass
+
+    def visit_desc_name(self, node):
+        self.body.append(r'\fB') # option name
+
+    def depart_desc_name(self, node):
+        self.body.append(r'\fR')
+
+    def visit_desc_addname(self, node):
+        self.body.append(r'\fR')
+
+    def depart_desc_addname(self, node):
+        # self.body.append(r'\fR')
+        pass
+
+    def visit_desc_content(self, node):
+        self.body.append('\n') # option help
+
+    def depart_desc_content(self, node):
+        pass
+
+    def unimplemented_visit(self, node):
+        pass
+
+# vim: set et ts=4 ai :
diff --git a/doc/manpage.pyc b/doc/manpage.pyc
new file mode 100644 (file)
index 0000000..d3feb34
Binary files /dev/null and b/doc/manpage.pyc differ
diff --git a/doc/more_info.rst b/doc/more_info.rst
new file mode 100644 (file)
index 0000000..a37aeb2
--- /dev/null
@@ -0,0 +1,48 @@
+About the name
+==============
+
+* nose is the least silly short synonym for discover in the dictionary.com
+  thesaurus that does not contain the word 'spy.'
+* Pythons have noses
+* The nose knows where to find your tests
+* Nose Obviates Suite Employment
+
+Contact the author
+==================
+
+You can email me at jpellerin+nose at gmail dot com.
+
+To report bugs, ask questions, or request features, please use the *issues*
+tab at the Google code site: http://code.google.com/p/python-nose/issues/list.
+Patches are welcome!
+
+Similar test runners
+====================
+
+nose was inspired mainly by py.test_, which is a great test runner, but
+formerly was not all that easy to install, and is not based on unittest.
+
+Test suites written for use with nose should work equally well with py.test,
+and vice versa, except for the differences in output capture and command line
+arguments for the respective tools.
+
+.. _py.test: http://codespeak.net/py/current/doc/test.html
+
+License and copyright
+=====================
+
+nose is copyright Jason Pellerin 2005-2009
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
\ No newline at end of file
diff --git a/doc/news.rst b/doc/news.rst
new file mode 100644 (file)
index 0000000..089b935
--- /dev/null
@@ -0,0 +1,4 @@
+What's new
+==========
+
+.. include :: ../CHANGELOG
diff --git a/doc/plugins.rst b/doc/plugins.rst
new file mode 100644 (file)
index 0000000..4053b8c
--- /dev/null
@@ -0,0 +1,70 @@
+Extending and customizing nose with plugins
+===========================================
+
+nose has plugin hooks for loading, running, watching and reporting on tests and
+test runs. If you don't like the default collection scheme, or it doesn't suit
+the layout of your project, or you need reports in a format different from the
+unittest standard, or you need to collect some additional information about
+tests (like code coverage or profiling data), you can write a plugin to do so.
+See the section on `writing plugins`_ for more. 
+
+nose also comes with a number of built-in plugins, such as:
+
+* Output capture
+  
+  Unless called with the ``-s`` (``--nocapture``) switch, nose will capture
+  stdout during each test run, and print the captured output only for tests
+  that fail or have errors. The captured output is printed immediately
+  following the error or failure output for the test. (Note that output in
+  teardown methods is captured, but can't be output with failing tests, because
+  teardown has not yet run at the time of the failure.)
+
+* Assert introspection
+
+  When run with the ``-d`` (``--detailed-errors``) switch, nose will try to
+  output additional information about the assert expression that failed with
+  each failing test. Currently, this means that names in the assert expression
+  will be expanded into any values found for them in the locals or globals in
+  the frame in which the expression executed.
+  
+  In other words, if you have a test like::
+  
+    def test_integers():
+        a = 2
+        assert a == 4, "assert 2 is 4"
+    
+  You will get output like::
+    
+      File "/path/to/file.py", line XX, in test_integers:
+           assert a == 4, "assert 2 is 4"
+      AssertionError: assert 2 is 4
+        >>  assert 2 == 4, "assert 2 is 4"
+    
+  Please note that dotted names are not expanded, and callables are not called
+  in the expansion.
+
+See below for the rest of the built-in plugins.
+
+Using Builtin plugins
+---------------------
+
+See :doc:`plugins/builtin`
+
+Writing plugins
+---------------
+
+.. toctree ::
+   :maxdepth: 2
+   
+   plugins/writing
+   plugins/interface
+   plugins/errorclasses
+   plugins/documenting
+   
+Testing plugins
+---------------
+
+.. toctree ::
+   :maxdepth: 2
+   
+   plugins/testing
\ No newline at end of file
diff --git a/doc/plugins/allmodules.rst b/doc/plugins/allmodules.rst
new file mode 100644 (file)
index 0000000..ad6d034
--- /dev/null
@@ -0,0 +1,4 @@
+AllModules: collect tests in all modules
+========================================
+
+.. autoplugin :: nose.plugins.allmodules
\ No newline at end of file
diff --git a/doc/plugins/attrib.rst b/doc/plugins/attrib.rst
new file mode 100644 (file)
index 0000000..beaa834
--- /dev/null
@@ -0,0 +1,4 @@
+Attrib: tag and select tests with attributes
+============================================
+
+.. autoplugin :: nose.plugins.attrib
diff --git a/doc/plugins/builtin.rst b/doc/plugins/builtin.rst
new file mode 100644 (file)
index 0000000..8d2147f
--- /dev/null
@@ -0,0 +1,30 @@
+Batteries included: builtin nose plugins
+========================================
+
+nose includes a number of builtin plugins that can make testing faster and easier.
+
+.. note ::
+
+   nose 0.11.2 includes a change to default plugin loading. Now, a 3rd party
+   plugin with *the same name* as a builtin *will be loaded instead*
+   of the builtin plugin.
+
+.. toctree ::
+   :maxdepth: 2
+
+   allmodules
+   attrib
+   capture
+   collect
+   cover
+   debug
+   deprecated
+   doctests
+   failuredetail
+   isolate
+   logcapture
+   multiprocess
+   prof
+   skip
+   testid
+   xunit
diff --git a/doc/plugins/capture.rst b/doc/plugins/capture.rst
new file mode 100644 (file)
index 0000000..27a0c2e
--- /dev/null
@@ -0,0 +1,5 @@
+Capture: capture stdout during tests
+====================================
+
+.. autoplugin :: nose.plugins.capture
+
diff --git a/doc/plugins/collect.rst b/doc/plugins/collect.rst
new file mode 100644 (file)
index 0000000..011a96d
--- /dev/null
@@ -0,0 +1,4 @@
+Collect: Collect tests quickly
+==============================
+
+.. autoplugin :: nose.plugins.collect
\ No newline at end of file
diff --git a/doc/plugins/cover.rst b/doc/plugins/cover.rst
new file mode 100644 (file)
index 0000000..e970b2c
--- /dev/null
@@ -0,0 +1,14 @@
+Cover: code coverage
+====================
+
+.. note ::
+
+   Newer versions of coverage contain their own nose plugin which is
+   superior to the builtin plugin. It exposes more of coverage's
+   options and uses coverage's native html output. Depending on the
+   version of coverage installed, the included plugin may override the
+   nose builtin plugin, or be available under a different name. Check
+   ``nosetests --help`` or ``nosetests --plugins`` to find out which
+   coverage plugin is available on your system.
+
+.. autoplugin :: nose.plugins.cover
diff --git a/doc/plugins/debug.rst b/doc/plugins/debug.rst
new file mode 100644 (file)
index 0000000..cac67f3
--- /dev/null
@@ -0,0 +1,4 @@
+Debug: drop into pdb on errors or failures
+==========================================
+
+.. autoplugin :: nose.plugins.debug
\ No newline at end of file
diff --git a/doc/plugins/deprecated.rst b/doc/plugins/deprecated.rst
new file mode 100644 (file)
index 0000000..ebb8140
--- /dev/null
@@ -0,0 +1,4 @@
+Deprecated: mark tests as deprecated
+====================================
+
+.. autoplugin :: nose.plugins.deprecated
diff --git a/doc/plugins/doctests.rst b/doc/plugins/doctests.rst
new file mode 100644 (file)
index 0000000..9763765
--- /dev/null
@@ -0,0 +1,4 @@
+Doctests: run doctests with nose
+================================
+
+.. autoplugin :: nose.plugins.doctests
diff --git a/doc/plugins/documenting.rst b/doc/plugins/documenting.rst
new file mode 100644 (file)
index 0000000..c841f76
--- /dev/null
@@ -0,0 +1,62 @@
+Documenting plugins
+===================
+
+A parable. If a plugin is released on pypi without any documentation, does
+anyone care?
+
+To make it easy to document your plugins, nose includes a `Sphinx`_ extension
+that will automatically generate plugin docs like those for nose's builtin
+plugins. Simply add 'nose.sphinx.pluginopts' to the list of extensions in your
+conf.py::
+
+  extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
+                'nose.sphinx.pluginopts']
+
+Then in your plugin documents, include a title and the ``.. autoplugin``
+directive::
+
+  My Cool Plugin
+  ==============
+
+  .. autoplugin :: package.module.with.plugin
+     :plugin: PluginClass
+
+The ``:plugin:`` option is optional. In most cases, the directive will
+automatically detect which class in the named module is the plugin to be
+documented.
+
+The output of the directive includes the docstring of the plugin module, the
+options defined by the plugin, `autodoc`_ generated for the plugin class, and
+the plugin module source. This is roughly equivalent to::
+
+  My Cool Plugin
+  ==============
+
+  .. automodule :: package.module.with.plugin
+
+  Options
+  -------
+  
+  .. cmdoption :: --with-coolness
+
+     Help text of the coolness option.
+
+  .. cmdoption :: 
+
+  Plugin
+  -------
+  .. autoclass :: package.module.with.plugin.PluginClass
+     :members:
+
+  Source
+  ------
+
+  .. include :: path/to/package/module/with/plugin.py
+     :literal:
+
+Document your plugins! Your users might not thank you -- but at least you'll
+*have* some users.
+
+.. _`Sphinx` : http://sphinx.pocoo.org/
+.. _`autodoc`: http://sphinx.pocoo.org/ext/autodoc.html
\ No newline at end of file
diff --git a/doc/plugins/errorclasses.rst b/doc/plugins/errorclasses.rst
new file mode 100644 (file)
index 0000000..1e758f4
--- /dev/null
@@ -0,0 +1,7 @@
+.. automodule :: nose.plugins.errorclass
+
+Error class methods
+-------------------
+
+.. autoclass :: nose.plugins.errorclass.ErrorClassPlugin
+   :members:
diff --git a/doc/plugins/failuredetail.rst b/doc/plugins/failuredetail.rst
new file mode 100644 (file)
index 0000000..5a3d0df
--- /dev/null
@@ -0,0 +1,4 @@
+Failure Detail: introspect asserts
+==================================
+
+.. autoplugin :: nose.plugins.failuredetail
diff --git a/doc/plugins/interface.rst b/doc/plugins/interface.rst
new file mode 100644 (file)
index 0000000..cab8b9d
--- /dev/null
@@ -0,0 +1,122 @@
+
+.. _plugin-interface:
+
+Plugin Interface
+================
+
+Plugin base class
+-----------------
+
+.. autoclass :: nose.plugins.base.Plugin
+   :members:
+
+Nose plugin API
+---------------
+
+Plugins may implement any or all of the methods documented below. Please note
+that they *must not* subclass `IPluginInterface`; `IPluginInterface` is only a
+description of the plugin API.
+
+When plugins are called, the first plugin that implements a method and returns
+a non-None value wins, and plugin processing ends. The exceptions to this are
+methods marked as `generative` or `chainable`.  `generative` methods combine
+the output of all plugins that respond with an iterable into a single
+flattened iterable response (a generator, really). `chainable` methods pass
+the results of calling plugin A as the input to plugin B, where the positions
+in the chain are determined by the plugin sort order, which is in order by
+`score` descending.
+
+In general, plugin methods correspond directly to methods of
+`nose.selector.Selector`, `nose.loader.TestLoader` and
+`nose.result.TextTestResult` are called by those methods when they are
+called. In some cases, the plugin hook doesn't neatly match the method in
+which it is called; for those, the documentation for the hook will tell you
+where in the test process it is called.
+
+Plugin hooks fall into four broad categories: selecting and loading tests,
+handling errors raised by tests, preparing objects used in the testing
+process, and watching and reporting on test results.
+
+Selecting and loading tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To alter test selection behavior, implement any necessary `want*` methods as
+outlined below. Keep in mind, though, that when your plugin returns True from
+a `want*` method, you will send the requested object through the normal test
+collection process. If the object represents something from which normal tests
+can't be collected, you must also implement a loader method to load the tests.
+
+Examples:
+
+* The builtin :doc:`doctests plugin <doctests>` implements `wantFile` to
+  enable loading of doctests from files that are not python modules. It
+  also implements `loadTestsFromModule` to load doctests from
+  python modules, and `loadTestsFromFile` to load tests from the
+  non-module files selected by `wantFile`.
+   
+* The builtin :doc:`attrib plugin <attrib>` implements `wantFunction` and
+  `wantMethod` so that it can reject tests that don't match the
+  specified attributes.
+
+Handling errors
+^^^^^^^^^^^^^^^
+
+To alter error handling behavior -- for instance to catch a certain class of 
+exception and handle it differently from the normal error or failure handling
+-- you should subclass :class:`nose.plugins.errorclass.ErrorClassPlugin`. See
+:doc:`the section on ErrorClass plugins <errorclasses>` for more details.
+
+Examples:
+
+* The builtin :doc:`skip <skip>` and :doc:`deprecated <deprecated>` plugins are
+  ErrorClass plugins.
+
+
+Preparing test objects
+^^^^^^^^^^^^^^^^^^^^^^
+
+To alter, get a handle on, or replace test framework objects such as the
+loader, result, runner, and test cases, use the appropriate prepare methods.
+The simplest reason to use prepare is in the case that you need to use an
+object yourself. For example, the isolate plugin implements `prepareTestLoader`
+so that it can use the loader later on to load tests. If you return a value
+from a prepare method, that value will be used in place of the loader, result,
+runner or test case, depending on which prepare method you use. Be aware that
+when replacing test cases, you are replacing the *entire* test case -- including
+the whole `run(result)` method of the `unittest.TestCase` -- so if you want
+normal unittest test result reporting, you must implement the same calls to
+result as `unittest.TestCase.run`.
+
+Examples:
+
+* The builtin :doc:`isolate plugin <isolate>` implements `prepareTestLoader`
+  but does not replace the test loader.
+
+* The builtin :doc:`profile plugin <prof>` implements `prepareTest` and does
+  replace the top-level test case by returning the case wrapped in
+  the profiler function.
+
+Watching or reporting on tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To record information about tests or other modules imported during
+the testing process, output additional reports, or entirely change
+test report output, implement any of the methods outlined below that
+correspond to TextTestResult methods.
+
+Examples:
+
+* The builtin :doc:`cover plugin <cover>` implements `begin` and `report` to
+  capture and report code coverage metrics for all or selected modules
+  loaded during testing.
+   
+* The builtin :doc:`profile plugin <prof>` implements `begin`, `prepareTest`
+  and `report` to record and output profiling information. In this
+  case, the plugin's `prepareTest` method constructs a function that
+  runs the test through the hotshot profiler's runcall() method.
+
+Plugin interface methods
+------------------------
+
+.. autoclass :: nose.plugins.base.IPluginInterface
+   :members:
\ No newline at end of file
diff --git a/doc/plugins/isolate.rst b/doc/plugins/isolate.rst
new file mode 100644 (file)
index 0000000..94b5641
--- /dev/null
@@ -0,0 +1,4 @@
+Isolate: protect tests from (some) side-effects
+-----------------------------------------------
+
+.. autoplugin :: nose.plugins.isolate
\ No newline at end of file
diff --git a/doc/plugins/logcapture.rst b/doc/plugins/logcapture.rst
new file mode 100644 (file)
index 0000000..4bf09c3
--- /dev/null
@@ -0,0 +1,4 @@
+Logcapture: capture logging during tests
+========================================
+
+.. autoplugin :: nose.plugins.logcapture
\ No newline at end of file
diff --git a/doc/plugins/multiprocess.rst b/doc/plugins/multiprocess.rst
new file mode 100644 (file)
index 0000000..c9f4aa7
--- /dev/null
@@ -0,0 +1,5 @@
+------------------------------
+Multiprocess: parallel testing
+------------------------------
+
+.. autoplugin :: nose.plugins.multiprocess
diff --git a/doc/plugins/other.rst b/doc/plugins/other.rst
new file mode 100644 (file)
index 0000000..47490c3
--- /dev/null
@@ -0,0 +1,6 @@
+Third-party nose plugins
+------------------------
+
+Visit http://nose-plugins.jottit.com/ for a list of third-party nose plugins
+compatible with nose 0.9 through 0.11. If you have released a plugin that you
+don't see in the list, please add it!
diff --git a/doc/plugins/prof.rst b/doc/plugins/prof.rst
new file mode 100644 (file)
index 0000000..f778942
--- /dev/null
@@ -0,0 +1,4 @@
+Prof: enable profiling using the hotshot profiler
+=================================================
+
+.. autoplugin :: nose.plugins.prof
diff --git a/doc/plugins/skip.rst b/doc/plugins/skip.rst
new file mode 100644 (file)
index 0000000..07f9207
--- /dev/null
@@ -0,0 +1,5 @@
+Skip: mark tests as skipped
+===========================
+
+.. autoplugin :: nose.plugins.skip
+   :plugin: Skip
\ No newline at end of file
diff --git a/doc/plugins/testid.rst b/doc/plugins/testid.rst
new file mode 100644 (file)
index 0000000..377e2e7
--- /dev/null
@@ -0,0 +1,4 @@
+Testid: add a test id to each test name output
+==============================================
+
+.. autoplugin :: nose.plugins.testid
diff --git a/doc/plugins/testing.rst b/doc/plugins/testing.rst
new file mode 100644 (file)
index 0000000..1e17fb2
--- /dev/null
@@ -0,0 +1,7 @@
+.. automodule :: nose.plugins.plugintest
+
+PluginTester methods
+--------------------
+
+.. autoclass :: nose.plugins.plugintest.PluginTester
+   :members:
\ No newline at end of file
diff --git a/doc/plugins/writing.rst b/doc/plugins/writing.rst
new file mode 100644 (file)
index 0000000..ed73b9f
--- /dev/null
@@ -0,0 +1 @@
+.. automodule :: nose.plugins
\ No newline at end of file
diff --git a/doc/plugins/xunit.rst b/doc/plugins/xunit.rst
new file mode 100644 (file)
index 0000000..5602e5d
--- /dev/null
@@ -0,0 +1,4 @@
+Xunit: output test results in xunit format
+==========================================
+
+.. autoplugin :: nose.plugins.xunit
\ No newline at end of file
diff --git a/doc/rtd-requirements.txt b/doc/rtd-requirements.txt
new file mode 100644 (file)
index 0000000..2872958
--- /dev/null
@@ -0,0 +1,3 @@
+# requirements file for Read The Docs
+# http://readthedocs.org/docs/nose/
+sphinx>=1.0
diff --git a/doc/setuptools_integration.rst b/doc/setuptools_integration.rst
new file mode 100644 (file)
index 0000000..b886e76
--- /dev/null
@@ -0,0 +1,38 @@
+Setuptools integration
+======================
+
+.. warning :: Please note that when run under the setuptools test command,
+              many plugins will not be available, including the builtin
+              coverage and profiler plugins. If you want to access to all
+              available plugins, use the :doc:`nosetests <api/commands>`
+              command instead.
+
+nose may be used with the setuptools_ test command. Simply specify
+nose.collector as the test suite in your setup file::
+
+  setup (
+      # ...
+      test_suite = 'nose.collector'
+  )
+
+Then to find and run tests, you can run::
+
+  python setup.py test
+
+When running under setuptools, you can configure nose settings via the
+environment variables detailed in the nosetests script usage message,
+or the setup.cfg, ~/.noserc or ~/.nose.cfg config files.
+
+`nosetests` command
+-------------------
+
+nose also includes its own setuptools command, ``nosetests``, that provides
+support for all plugins and command line options. It works just like the
+``test`` command::
+
+  python setup.py nosetests
+
+See :doc:`api/commands` for more information about the ``nosetests`` command.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
diff --git a/doc/testing.rst b/doc/testing.rst
new file mode 100644 (file)
index 0000000..42bf6fe
--- /dev/null
@@ -0,0 +1,55 @@
+Testing with nose
+=================
+
+Writing tests is easier
+-----------------------
+
+nose collects tests from :class:`unittest.TestCase` subclasses, of course. But
+you can also write simple test functions, as well as test classes that are
+*not* subclasses of :class:`unittest.TestCase`. nose also supplies a number of
+helpful functions for writing timed tests, testing for exceptions, and other
+common use cases. See :doc:`writing_tests` and :doc:`testing_tools` for more.
+
+Running tests is easier
+-----------------------
+
+nose collects tests automatically, as long as you follow some simple
+guidelines for organizing your library and test code. There's no need
+to manually collect test cases into test suites. Running tests is
+responsive, since nose begins running tests as soon as the first test
+module is loaded. See :doc:`finding_tests` for more.
+
+Setting up your test environment is easier
+------------------------------------------
+
+nose supports fixtures at the package, module, class, and test case
+level, so expensive initialization can be done as infrequently as
+possible. See :ref:`fixtures` for more.
+
+Doing what you want to do is easier
+-----------------------------------
+
+nose comes with a number of :doc:`builtin plugins <plugins/builtin>` to help
+you with output capture, error introspection, code coverage, doctests, and
+more. It also comes with plugin hooks for loading, running, watching and
+reporting on tests and test runs. If you don't like the default collection
+scheme, or it doesn't suit the layout of your project, or you need reports in
+a format different from the unittest standard, or you need to collect some
+additional information about tests (like code coverage or profiling data), you
+can write a plugin to make nose do what you want. See the section on
+:doc:`plugins/writing` for more.  There are also many 
+`third-party nose plugins <http://nose-plugins.jottit.com/>`_ available.
+
+Details
+-------
+
+.. toctree ::
+   :maxdepth: 2
+
+   usage
+   writing_tests
+   finding_tests
+   testing_tools
+   plugins/builtin
+   plugins/other
+   setuptools_integration
diff --git a/doc/testing_tools.rst b/doc/testing_tools.rst
new file mode 100644 (file)
index 0000000..45e2958
--- /dev/null
@@ -0,0 +1,11 @@
+Testing tools
+-------------
+
+The nose.tools module provides a number of testing aids that you may
+find useful, including decorators for restricting test execution time
+and testing for exceptions, and all of the same assertX methods found
+in `unittest.TestCase` (only spelled in pep08 fashion, so `assert_equal`
+rather than `assertEqual`).
+
+.. automodule :: nose.tools
+   :members:
\ No newline at end of file
diff --git a/doc/usage.rst b/doc/usage.rst
new file mode 100644 (file)
index 0000000..11e682b
--- /dev/null
@@ -0,0 +1,42 @@
+Basic usage
+-----------
+
+Use the nosetests script (after installation by setuptools)::
+
+  nosetests [options] [(optional) test files or directories]
+
+In addition to passing command-line options, you may also put configuration
+options in a .noserc or nose.cfg file in your home directory. These are
+standard .ini-style config files. Put your nosetests configuration in a
+[nosetests] section, with the -- prefix removed::
+
+   [nosetests]
+   verbosity=3
+   with-doctest=1
+  
+There are several other ways to use the nose test runner besides the
+`nosetests` script. You may use nose in a test script::
+
+  import nose
+  nose.main()
+
+If you don't want the test script to exit with 0 on success and 1 on failure
+(like unittest.main), use nose.run() instead::
+
+  import nose
+  result = nose.run()
+  
+`result` will be true if the test run succeeded, or false if any test failed
+or raised an uncaught exception. Lastly, you can run nose.core directly, which
+will run nose.main()::
+
+  python /path/to/nose/core.py
+  
+Please see the usage message for the nosetests script for information
+about how to control which tests nose runs, which plugins are loaded,
+and the test output.
+
+Extended usage
+^^^^^^^^^^^^^^
+
+.. autohelp ::
diff --git a/doc/writing_tests.rst b/doc/writing_tests.rst
new file mode 100644 (file)
index 0000000..d2418bd
--- /dev/null
@@ -0,0 +1,172 @@
+Writing tests
+-------------
+
+As with py.test_, nose tests need not be subclasses of
+:class:`unittest.TestCase`. Any function or class that matches the configured
+testMatch regular expression (``(?:^|[\\b_\\.-])[Tt]est)`` by default -- that
+is, has test or Test at a word boundary or following a - or _) and lives in a
+module that also matches that expression will be run as a test. For the sake
+of compatibility with legacy unittest test cases, nose will also load tests
+from :class:`unittest.TestCase` subclasses just like unittest does. Like
+py.test, nose runs functional tests in the order in which they appear in the
+module file. TestCase-derived tests and other test classes are run in
+alphabetical order.
+
+.. _py.test: http://codespeak.net/py/current/doc/test.html
+
+.. _fixtures:
+
+Fixtures
+========
+
+nose supports fixtures (setup and teardown methods) at the package,
+module, class, and test level. As with py.test or unittest fixtures,
+setup always runs before any test (or collection of tests for test
+packages and modules); teardown runs if setup has completed
+successfully, regardless of the status of the test run. For more detail
+on fixtures at each level, see below.
+
+Test packages
+=============
+
+nose allows tests to be grouped into test packages. This allows
+package-level setup; for instance, if you need to create a test database
+or other data fixture for your tests, you may create it in package setup
+and remove it in package teardown once per test run, rather than having to
+create and tear it down once per test module or test case.
+
+To create package-level setup and teardown methods, define setup and/or
+teardown functions in the ``__init__.py`` of a test package. Setup methods may
+be named `setup`, `setup_package`, `setUp`, or `setUpPackage`; teardown may
+be named `teardown`, `teardown_package`, `tearDown` or `tearDownPackage`.
+Execution of tests in a test package begins as soon as the first test
+module is loaded from the test package.
+
+Test modules
+============
+
+A test module is a python module that matches the testMatch regular
+expression. Test modules offer module-level setup and teardown; define the
+method `setup`, `setup_module`, `setUp` or `setUpModule` for setup,
+`teardown`, `teardown_module`, or `tearDownModule` for teardown. Execution
+of tests in a test module begins after all tests are collected.
+
+Test classes
+============
+
+A test class is a class defined in a test module that matches testMatch or is
+a subclass of :class:`unittest.TestCase`. All test classes are run the same
+way: Methods in the class that match testMatch are discovered, and a test
+case is constructed to run each method with a fresh instance of the test
+class. Like :class:`unittest.TestCase` subclasses, other test classes can
+define setUp and tearDown methods that will be run before and after each test
+method. Test classes that do not descend from `unittest.TestCase` may also
+include generator methods and class-level fixtures. Class-level setup fixtures
+may be named `setup_class`, `setupClass`, `setUpClass`, `setupAll` or 
+`setUpAll`; teardown fixtures may be named `teardown_class`, `teardownClass`, 
+`tearDownClass`, `teardownAll` or `tearDownAll`. Class-level setup and teardown
+fixtures must be class methods.
+
+Test functions
+==============
+
+Any function in a test module that matches testMatch will be wrapped in a
+`FunctionTestCase` and run as a test. The simplest possible failing test is
+therefore::
+
+  def test():
+      assert False
+
+And the simplest passing test::
+
+  def test():
+      pass
+
+Test functions may define setup and/or teardown attributes, which will be
+run before and after the test function, respectively. A convenient way to
+do this, especially when several test functions in the same module need
+the same setup, is to use the provided `with_setup` decorator::
+
+  def setup_func():
+      "set up test fixtures"
+
+  def teardown_func():
+      "tear down test fixtures"
+
+  @with_setup(setup_func, teardown_func)
+  def test():
+      "test ..."
+
+For python 2.3 or earlier, add the attributes by calling the decorator
+function like so::
+
+  def test():
+      "test ... "
+  test = with_setup(setup_func, teardown_func)(test)
+
+or by direct assignment::
+
+  test.setup = setup_func
+  test.teardown = teardown_func
+  
+Please note that `with_setup` is useful *only* for test functions, not
+for test methods in `unittest.TestCase` subclasses or other test
+classes. For those cases, define `setUp` and `tearDown` methods in the
+class.
+  
+Test generators
+===============
+
+nose supports test functions and methods that are generators. A simple
+example from nose's selftest suite is probably the best explanation::
+
+  def test_evens():
+      for i in range(0, 5):
+          yield check_even, i, i*3
+
+  def check_even(n, nn):
+      assert n % 2 == 0 or nn % 2 == 0
+
+This will result in five tests. nose will iterate the generator, creating a
+function test case wrapper for each tuple it yields. As in the example, test
+generators must yield tuples, the first element of which must be a callable
+and the remaining elements the arguments to be passed to the callable.
+
+By default, the test name output for a generated test in verbose mode
+will be the name of the generator function or method, followed by the
+args passed to the yielded callable. If you want to show a different test
+name, set the ``description`` attribute of the yielded callable.
+
+Setup and teardown functions may be used with test generators. However, please
+note that setup and teardown attributes attached to the *generator function*
+will execute only once. To *execute fixtures for each yielded test*, attach
+the setup and teardown attributes to the function that is yielded, or yield a
+callable object instance with setup and teardown attributes.
+
+For example::
+
+  @with_setup(setup_func, teardown_func)
+  def test_generator():
+      # ...
+      yield func, arg, arg # ...
+
+Here, the setup and teardown functions will be executed *once*. Compare to::
+
+  def test_generator():
+      # ...
+      yield func, arg, arg # ...
+
+  @with_setup(setup_func, teardown_func)
+  def func(arg):
+      assert something_about(arg)
+
+In the latter case the setup and teardown functions will execute once for each
+yielded test.
+
+For generator methods, the setUp and tearDown methods of the class (if any)
+will be run before and after each generated test case. The setUp and tearDown
+methods *do not* run before the generator method itself, as this would cause
+setUp to run twice before the first test without an intervening tearDown.
+
+Please note that method generators *are not* supported in `unittest.TestCase`
+subclasses.
\ No newline at end of file
diff --git a/examples/attrib_plugin.py b/examples/attrib_plugin.py
new file mode 100644 (file)
index 0000000..c1f8458
--- /dev/null
@@ -0,0 +1,82 @@
+"""\r
+Examples of test function/method attribute usage with patched nose\r
+\r
+Simple syntax (-a, --attr) examples:\r
+  * nosetests -a status=stable\r
+    => only test cases with attribute "status" having value "stable"\r
+\r
+  * nosetests -a priority=2,status=stable\r
+    => both attributes must match\r
+\r
+  * nosetests -a tags=http\r
+    => attribute list "tags" must contain value "http" (see test_foobar()\r
+       below for definition)\r
+\r
+  * nosetests -a slow\r
+    => attribute "slow" must be defined and its value cannot equal to False\r
+       (False, [], "", etc...)\r
+\r
+  * nosetests -a !slow\r
+    => attribute "slow" must NOT be defined or its value must be equal to False\r
+\r
+Eval expression syntax (-A, --eval-attr) examples:\r
+  * nosetests -A "not slow"\r
+  * nosetests -A "(priority > 5) and not slow"\r
+  \r
+This example and the accompanied patch is in public domain, free for any use.\r
+\r
+email: mika.eloranta@gmail.com\r
+\r
+"""\r
+\r
+__author__ = 'Mika Eloranta'\r
+\r
+def attr(**kwargs):\r
+    """Add attributes to a test function/method/class"""\r
+    def wrap(func):\r
+        func.__dict__.update(kwargs)\r
+        return func\r
+    return wrap\r
+\r
+# test function with single attribute\r
+@attr(priority = 1)\r
+def test_dummy():\r
+    print "dummy"\r
+    \r
+# test function with multiple attributes\r
+@attr(status = "stable",              # simple string attribute\r
+      slow = True,                    # attributes can be of any type\r
+                                      #   (e.g. bool)\r
+      priority = 1,                   # ...or int\r
+      tags = ["http", "pop", "imap"]) # will be run if any of the list items\r
+                                      #   matches\r
+def test_foobar():\r
+    print "foobar"\r
+\r
+# another way of adding attributes...\r
+def test_fluffy():\r
+    print "fluffy"\r
+test_fluffy.status = "unstable"\r
+test_fluffy.slow = True\r
+test_fluffy.priority = 2\r
+\r
+# works for class methods, too\r
+class TestSomething:\r
+    @attr(status = "stable", priority = 2)\r
+    def test_xyz(self):\r
+        print "xyz"\r
+\r
+# class methods "inherit" attributes from the class but can override them\r
+class TestOverride:\r
+    value = "class"\r
+    # run all methods with "nosetests -a value"\r
+\r
+    @attr(value = "method")\r
+    def test_override(self):\r
+        # run with "nosetests -a value=method"\r
+        print "override"\r
+    \r
+    def test_inherit(self):\r
+        # run with "nosetests -a value=class"\r
+        print "inherit"\r
+    \r
diff --git a/examples/html_plugin/htmlplug.py b/examples/html_plugin/htmlplug.py
new file mode 100644 (file)
index 0000000..aa1bcb6
--- /dev/null
@@ -0,0 +1,92 @@
+"""This is a very basic example of a plugin that controls all test
+output. In this case, it formats the output as ugly unstyled html.
+
+Upgrading this plugin into one that uses a template and css to produce
+nice-looking, easily-modifiable html output is left as an exercise for
+the reader who would like to see his or her name in the nose AUTHORS file.
+"""
+import traceback
+from nose.plugins import Plugin
+
+class HtmlOutput(Plugin):
+    """Output test results as ugly, unstyled html.
+    """
+    
+    name = 'html-output'
+    score = 2 # run late
+    
+    def __init__(self):
+        super(HtmlOutput, self).__init__()
+        self.html = [ '<html><head>',
+                      '<title>Test output</title>',
+                      '</head><body>' ]
+    
+    def addSuccess(self, test):
+        self.html.append('<span>ok</span>')
+        
+    def addError(self, test, err):
+        err = self.formatErr(err)
+        self.html.append('<span>ERROR</span>')
+        self.html.append('<pre>%s</pre>' % err)
+            
+    def addFailure(self, test, err):
+        err = self.formatErr(err)
+        self.html.append('<span>FAIL</span>')
+        self.html.append('<pre>%s</pre>' % err)
+
+    def finalize(self, result):
+        self.html.append('<div>')
+        self.html.append("Ran %d test%s" %
+                         (result.testsRun, result.testsRun != 1 and "s" or ""))
+        self.html.append('</div>')
+        self.html.append('<div>')
+        if not result.wasSuccessful():
+            self.html.extend(['<span>FAILED ( ',
+                              'failures=%d ' % len(result.failures),
+                              'errors=%d' % len(result.errors),
+                              ')</span>'])                             
+        else:
+            self.html.append('OK')
+        self.html.append('</div></body></html>')
+        # print >> sys.stderr, self.html
+        for l in self.html:
+            self.stream.writeln(l)
+
+    def formatErr(self, err):
+        exctype, value, tb = err
+        return ''.join(traceback.format_exception(exctype, value, tb))
+    
+    def setOutputStream(self, stream):
+        # grab for own use
+        self.stream = stream        
+        # return dummy stream
+        class dummy:
+            def write(self, *arg):
+                pass
+            def writeln(self, *arg):
+                pass
+        d = dummy()
+        return d
+
+    def startContext(self, ctx):
+        try:
+            n = ctx.__name__
+        except AttributeError:
+            n = str(ctx).replace('<', '').replace('>', '')
+        self.html.extend(['<fieldset>', '<legend>', n, '</legend>'])
+        try:
+            path = ctx.__file__.replace('.pyc', '.py')
+            self.html.extend(['<div>', path, '</div>'])
+        except AttributeError:
+            pass
+
+    def stopContext(self, ctx):
+        self.html.append('</fieldset>')
+    
+    def startTest(self, test):
+        self.html.extend([ '<div><span>',
+                           test.shortDescription() or str(test),
+                           '</span>' ])
+        
+    def stopTest(self, test):
+        self.html.append('</div>')
diff --git a/examples/html_plugin/setup.py b/examples/html_plugin/setup.py
new file mode 100644 (file)
index 0000000..3caa08d
--- /dev/null
@@ -0,0 +1,24 @@
+import sys
+try:
+    import ez_setup
+    ez_setup.use_setuptools()
+except ImportError:
+    pass
+
+from setuptools import setup
+
+setup(
+    name='Example html output plugin',
+    version='0.1',
+    author='Jason Pellerin',
+    author_email = 'jpellerin+nose@gmail.com',
+    description = 'Example nose html output plugin',
+    license = 'GNU LGPL',
+    py_modules = ['htmlplug'],
+    entry_points = {
+        'nose.plugins.0.10': [
+            'htmlout = htmlplug:HtmlOutput'
+            ]
+        }
+
+    )
diff --git a/examples/plugin/plug.py b/examples/plugin/plug.py
new file mode 100644 (file)
index 0000000..444226d
--- /dev/null
@@ -0,0 +1,4 @@
+from nose.plugins import Plugin
+
+class ExamplePlugin(Plugin):
+    pass
diff --git a/examples/plugin/setup.py b/examples/plugin/setup.py
new file mode 100644 (file)
index 0000000..4dd5dad
--- /dev/null
@@ -0,0 +1,27 @@
+"""
+An example of how to create a simple nose plugin.
+
+"""
+try:
+    import ez_setup
+    ez_setup.use_setuptools()
+except ImportError:
+    pass
+
+from setuptools import setup
+
+setup(
+    name='Example plugin',
+    version='0.1',
+    author='Jason Pellerin',
+    author_email = 'jpellerin+nose@gmail.com',
+    description = 'Example nose plugin',
+    license = 'GNU LGPL',
+    py_modules = ['plug'],
+    entry_points = {
+        'nose.plugins.0.10': [
+            'example = plug:ExamplePlugin'
+            ]
+        }
+
+    )
diff --git a/functional_tests/doc_tests/test_addplugins/support/test$py.class b/functional_tests/doc_tests/test_addplugins/support/test$py.class
new file mode 100644 (file)
index 0000000..f4dea0b
Binary files /dev/null and b/functional_tests/doc_tests/test_addplugins/support/test$py.class differ
diff --git a/functional_tests/doc_tests/test_addplugins/support/test.py b/functional_tests/doc_tests/test_addplugins/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/functional_tests/doc_tests/test_addplugins/support/test.pyc b/functional_tests/doc_tests/test_addplugins/support/test.pyc
new file mode 100644 (file)
index 0000000..0e4b6ad
Binary files /dev/null and b/functional_tests/doc_tests/test_addplugins/support/test.pyc differ
diff --git a/functional_tests/doc_tests/test_addplugins/test_addplugins.rst b/functional_tests/doc_tests/test_addplugins/test_addplugins.rst
new file mode 100644 (file)
index 0000000..8e8da91
--- /dev/null
@@ -0,0 +1,80 @@
+Using custom plugins without setuptools
+---------------------------------------
+
+If you have one or more custom plugins that you'd like to use with nose, but
+can't or don't want to register that plugin as a setuptools entrypoint, you
+can use the ``addplugins`` keyword argument to :func:`nose.core.main` or
+:func:`nose.core.run` to make the plugins available.
+
+To do this you would construct a launcher script for nose, something like::
+
+  from nose import main
+  from yourpackage import YourPlugin, YourOtherPlugin
+
+  if __name__ == '__main__':
+      nose.main(addplugins=[YourPlugin(), YourOtherPlugin()])
+
+Here's an example. Say that you don't like the fact that the collect-only
+plugin outputs 'ok' for each test it finds; instead you want it to output
+'maybe.' You could modify the plugin itself, or instead, create a Maybe plugin
+that transforms the output into your desired shape.
+
+Without the plugin, we get 'ok.'
+
+>>> import os
+>>> support = os.path.join(os.path.dirname(__file__), 'support')
+>>> from nose.plugins.plugintest import run_buffered as run
+>>> argv = [__file__, '-v', support] # --collect-only
+>>> run(argv=argv)
+test.test ... ok
+<BLANKLINE>
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+Without '-v', we get a dot.
+
+>>> run(argv=[__file__, support])
+.
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+The plugin is simple. It captures and wraps the test result output stream and
+replaces 'ok' with 'maybe' and '.' with '?'.
+
+>>> from nose.plugins.base import Plugin
+>>> class Maybe(Plugin):
+...     def setOutputStream(self, stream):
+...         self.stream = stream
+...         return self
+...     def flush(self):
+...         self.stream.flush()
+...     def writeln(self, out=""):
+...         self.write(out + "\n")
+...     def write(self, out):
+...         if out == "ok\n":
+...             out = "maybe\n"
+...         elif out == ".":
+...             out = "?"
+...         self.stream.write(out)
+
+To activate the plugin, we pass an instance in the addplugins list.
+
+>>> run(argv=argv + ['--with-maybe'], addplugins=[Maybe()])
+test.test ... maybe
+<BLANKLINE>
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
+>>> run(argv=[__file__, support, '--with-maybe'], addplugins=[Maybe()])
+?
+----------------------------------------------------------------------
+Ran 1 test in ...s
+<BLANKLINE>
+OK
+
diff --git a/functional_tests/doc_tests/test_allmodules/support/mod$py.class b/functional_tests/doc_tests/test_allmodules/support/mod$py.class
new file mode 100644 (file)
index 0000000..6322627
Binary files /dev/null and b/functional_tests/doc_tests/test_allmodules/support/mod$py.class differ
diff --git a/functional_tests/doc_tests/test_allmodules/support/mod.py b/functional_tests/doc_tests/test_allmodules/support/mod.py
new file mode 100644 (file)
index 0000000..e136d56
--- /dev/null
@@ -0,0 +1,5 @@
+def test():
+    pass
+
+def test_fails():
+    assert False, "This test fails"
diff --git a/functional_tests/doc_tests/test_allmodules/support/mod.pyc b/functional_tests/doc_tests/test_allmodules/support/mod.pyc
new file mode 100644 (file)
index 0000000..17ebf6e
Binary files /dev/null and b/functional_tests/doc_tests/test_allmodules/support/mod.pyc differ
diff --git a/functional_tests/doc_tests/test_allmodules/support/test$py.class b/functional_tests/doc_tests/test_allmodules/support/test$py.class
new file mode 100644 (file)
index 0000000..2d22e0d
Binary files /dev/null and b/functional_tests/doc_tests/test_allmodules/support/test$py.class differ
diff --git a/functional_tests/doc_tests/test_allmodules/support/test.py b/functional_tests/doc_tests/test_allmodules/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/functional_tests/doc_tests/test_allmodules/support/test.pyc b/functional_tests/doc_tests/test_allmodules/support/test.pyc
new file mode 100644 (file)
index 0000000..5648105
Binary files /dev/null and b/functional_tests/doc_tests/test_allmodules/support/test.pyc differ
diff --git a/functional_tests/doc_tests/test_allmodules/test_allmodules.rst b/functional_tests/doc_tests/test_allmodules/test_allmodules.rst
new file mode 100644 (file)
index 0000000..b541987
--- /dev/null
@@ -0,0 +1,67 @@
+Finding tests in all modules
+============================
+
+Normally, nose only looks for tests in modules whose names match testMatch. By
+default that means modules with 'test' or 'Test' at the start of the name
+after an underscore (_) or dash (-) or other non-alphanumeric character.
+
+If you want to collect tests from all modules, use the ``--all-modules``
+command line argument to activate the :doc:`allmodules plugin
+<../../plugins/allmodules>`.
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> argv = [__file__, '-v', support]
+
+The target directory contains a test module and a normal module.
+
+    >>> support_files = [d for d in os.listdir(support)
+    ...                  if not d.startswith('.')
+    ...                  and d.endswith('.py')]
+    >>> support_files.sort()
+    >>> support_files
+    ['mod.py', 'test.py']
+
+When run without ``--all-modules``, only the test module is examined for tests.
+
+    >>> run(argv=argv)
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+When ``--all-modules`` is active, both modules are examined.
+
+    >>> from nose.plugins.allmodules import AllModules
+    >>> argv = [__file__, '-v', '--all-modules', support]
+    >>> run(argv=argv, plugins=[AllModules()]) # doctest: +REPORT_NDIFF
+    mod.test ... ok
+    mod.test_fails ... FAIL
+    test.test ... ok
+    <BLANKLINE>
+    ======================================================================
+    FAIL: mod.test_fails
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AssertionError: This test fails
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    FAILED (failures=1)
+
+
+
diff --git a/functional_tests/doc_tests/test_coverage_html/coverage_html.rst b/functional_tests/doc_tests/test_coverage_html/coverage_html.rst
new file mode 100644 (file)
index 0000000..95f9e8a
--- /dev/null
@@ -0,0 +1,57 @@
+Generating HTML Coverage with nose
+----------------------------------
+
+.. Note ::
+
+    HTML coverage requires Ned Batchelder's `coverage.py`_ module.
+..
+
+Console coverage output is useful but terse. For a more browseable view of
+code coverage, the coverage plugin supports basic HTML coverage output.
+
+.. hide this from the actual documentation:
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> cover_html_dir = os.path.join(support, 'cover')
+    >>> cover_file = os.path.join(os.getcwd(), '.coverage')
+    >>> if os.path.exists(cover_file):
+    ...     os.unlink(cover_file)
+    ...
+
+
+The console coverage output is printed, as normal.
+
+    >>> from nose.plugins.cover import Coverage
+    >>> cover_html_dir = os.path.join(support, 'cover')
+    >>> run(argv=[__file__, '-v', '--with-coverage', '--cover-package=blah', 
+    ...           '--cover-html', '--cover-html-dir=' + cover_html_dir,
+    ...           support, ], 
+    ...     plugins=[Coverage()]) # doctest: +REPORT_NDIFF
+    test_covered.test_blah ... hi
+    ok
+    <BLANKLINE>
+    Name    Stmts   Miss  Cover   Missing
+    -------------------------------------
+    blah        4      1    75%   6
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+The html coverage reports are saved to disk in the directory specified by the
+``--cover-html-dir`` option. In that directory you'll find ``index.html``
+which links to a detailed coverage report for each module in the report. The
+detail pages show the module source, colorized to indicated which lines are
+covered and which are not. There is an example of this HTML output in the
+`coverage.py`_ docs.
+
+.. hide this from the actual documentation:
+    >>> os.path.exists(cover_file)
+    True
+    >>> os.path.exists(os.path.join(cover_html_dir, 'index.html'))
+    True
+    >>> os.path.exists(os.path.join(cover_html_dir, 'blah.html'))
+    True
+
+.. _`coverage.py`: http://nedbatchelder.com/code/coverage/
diff --git a/functional_tests/doc_tests/test_coverage_html/coverage_html.rst.py3.patch b/functional_tests/doc_tests/test_coverage_html/coverage_html.rst.py3.patch
new file mode 100644 (file)
index 0000000..f325a01
--- /dev/null
@@ -0,0 +1,16 @@
+--- coverage_html.rst.orig     2010-08-31 23:13:33.000000000 -0700
++++ coverage_html.rst  2010-08-31 23:14:25.000000000 -0700
+@@ -78,11 +78,11 @@
+     </div>
+     <div class="coverage">
+     <div class="cov"><span class="num"><pre>1</pre></span><pre>def dostuff():</pre></div>
+-    <div class="cov"><span class="num"><pre>2</pre></span><pre>    print 'hi'</pre></div>
++    <div class="cov"><span class="num"><pre>2</pre></span><pre>    print('hi')</pre></div>
+     <div class="skip"><span class="num"><pre>3</pre></span><pre></pre></div>
+     <div class="skip"><span class="num"><pre>4</pre></span><pre></pre></div>
+     <div class="cov"><span class="num"><pre>5</pre></span><pre>def notcov():</pre></div>
+-    <div class="nocov"><span class="num"><pre>6</pre></span><pre>    print 'not covered'</pre></div>
++    <div class="nocov"><span class="num"><pre>6</pre></span><pre>    print('not covered')</pre></div>
+     <div class="skip"><span class="num"><pre>7</pre></span><pre></pre></div>
+     </div>
+     </body>
diff --git a/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures$py.class b/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures$py.class
new file mode 100644 (file)
index 0000000..79f8015
Binary files /dev/null and b/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures$py.class differ
diff --git a/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.py b/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.py
new file mode 100644 (file)
index 0000000..6829dc2
--- /dev/null
@@ -0,0 +1,26 @@
+import sys
+import os
+import shutil
+from nose.plugins.skip import SkipTest
+from nose.plugins.cover import Coverage
+from nose.plugins.plugintest import munge_nose_output_for_doctest
+
+# This fixture is not reentrant because we have to cleanup the files that
+# coverage produces once all tests have finished running.
+_multiprocess_shared_ = True
+
+def setup_module():
+    try:
+        import coverage
+        if 'active' in Coverage.status:
+            raise SkipTest("Coverage plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("coverage module not available")
+
+def teardown_module():
+    # Clean up the files produced by coverage
+    cover_html_dir = os.path.join(os.path.dirname(__file__), 'support', 'cover')
+    if os.path.exists(cover_html_dir):
+        shutil.rmtree(cover_html_dir)
+
diff --git a/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.pyc b/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.pyc
new file mode 100644 (file)
index 0000000..00eee1b
Binary files /dev/null and b/functional_tests/doc_tests/test_coverage_html/coverage_html_fixtures.pyc differ
diff --git a/functional_tests/doc_tests/test_coverage_html/support/blah.py b/functional_tests/doc_tests/test_coverage_html/support/blah.py
new file mode 100644 (file)
index 0000000..ef6657c
--- /dev/null
@@ -0,0 +1,6 @@
+def dostuff():
+    print 'hi'
+
+
+def notcov():
+    print 'not covered'
diff --git a/functional_tests/doc_tests/test_coverage_html/support/blah.pyc b/functional_tests/doc_tests/test_coverage_html/support/blah.pyc
new file mode 100644 (file)
index 0000000..923a02c
Binary files /dev/null and b/functional_tests/doc_tests/test_coverage_html/support/blah.pyc differ
diff --git a/functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.py b/functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.py
new file mode 100644 (file)
index 0000000..c669c5c
--- /dev/null
@@ -0,0 +1,4 @@
+import blah
+
+def test_blah():
+    blah.dostuff()
diff --git a/functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.pyc b/functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.pyc
new file mode 100644 (file)
index 0000000..b4d933c
Binary files /dev/null and b/functional_tests/doc_tests/test_coverage_html/support/tests/test_covered.pyc differ
diff --git a/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures.rst b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures.rst
new file mode 100644 (file)
index 0000000..6ff8fed
--- /dev/null
@@ -0,0 +1,122 @@
+Doctest Fixtures
+----------------
+
+Doctest files, like other tests, can be made more efficient or meaningful or
+at least easier to write by judicious use of fixtures. nose supports limited
+fixtures for use with doctest files. 
+
+Module-level fixtures
+=====================
+
+Fixtures for a doctest file may define any or all of the following methods for
+module-level setup:
+
+* setup
+* setup_module
+* setupModule
+* setUpModule
+
+Each module-level setup function may optionally take a single argument, the
+fixtures module itself.
+
+Example::
+
+  def setup_module(module):
+      module.called[:] = []
+
+Similarly, module-level teardown methods are available, which also optionally
+take the fixtures module as an argument:
+      
+* teardown
+* teardown_module
+* teardownModule
+* tearDownModule
+
+Example::
+
+  def teardown_module(module):
+      module.called[:] = []
+      module.done = True
+
+Module-level setup executes **before any tests are loaded** from the doctest
+file. This is the right place to raise :class:`nose.plugins.skip.SkipTest`,
+for example.
+      
+Test-level fixtures
+===================
+
+In addition to module-level fixtures, *test*-level fixtures are
+supported. Keep in mind that in the doctest lexicon, the *test* is the *entire
+doctest file* -- not each individual example within the file. So, like the
+module-level fixtures, test-level fixtures execute *once per file*. The
+differences are that:
+
+- test-level fixtures execute **after** tests have been loaded, but **before**
+  any tests have executed.
+- test-level fixtures receive the doctest :class:`doctest.DocFileCase` loaded
+  from the file as their one *required* argument.
+      
+**setup_test(test)** is called before the test is run.
+
+Example::
+
+  def setup_test(test):
+      called.append(test)
+      test.globs['count'] = len(called)
+  setup_test.__test__ = False
+      
+**teardown_test(test)** is alled after the test, unless setup raised an
+uncaught exception. The argument is the :class:`doctest.DocFileCase` object,
+*not* a unittest.TestCase.
+
+Example::
+
+  def teardown_test(test):
+      pass
+  teardown_test.__test__ = False
+  
+Bottom line: setup_test, teardown_test have access to the *doctest test*,
+while setup, setup_module, etc have access to the *fixture*
+module. setup_module runs before tests are loaded, setup_test after.
+
+.. note ::
+
+   As in the examples, it's a good idea to tag your setup_test/teardown_test
+   functions with ``__test__ = False`` to avoid them being collected as tests.
+
+Lastly, the fixtures for a doctest file may supply a **globs(globs)**
+function. The dict returned by this function will be passed to the doctest
+runner as the globals available to the test. You can use this, for example, to
+easily inject a module's globals into a doctest that has been moved from the
+module to a separate file. 
+
+Example
+=======
+
+This doctest has some simple fixtures:
+
+.. include :: doctest_fixtures_fixtures.py
+   :literal:
+
+The ``globs`` defined in the fixtures make the variable ``something``
+available in all examples.
+   
+    >>> something
+    'Something?'
+
+The ``count`` variable is injected by the test-level fixture.
+    
+    >>> count
+    1
+
+.. warning ::
+
+  This whole file is one doctest test. setup_test doesn't do what you think!
+  It exists to give you access to the test case and examples, but it runs
+  *once*, before all of them, not before each.
+
+    >>> count
+    1
+
+  Thus, ``count`` stays 1 throughout the test, no matter how many examples it
+  includes.
\ No newline at end of file
diff --git a/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class
new file mode 100644 (file)
index 0000000..eeeff8a
Binary files /dev/null and b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures$py.class differ
diff --git a/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.py
new file mode 100644 (file)
index 0000000..72fbfd7
--- /dev/null
@@ -0,0 +1,17 @@
+called = []
+
+def globs(globs):
+    globs['something'] = 'Something?'
+    return globs
+
+def setup_module(module):
+    module.called[:] = []
+
+def setup_test(test):
+    called.append(test)
+    test.globs['count'] = len(called)
+setup_test.__test__ = False
+    
+def teardown_test(test):
+    pass
+teardown_test.__test__ = False
diff --git a/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc
new file mode 100644 (file)
index 0000000..bf8a880
Binary files /dev/null and b/functional_tests/doc_tests/test_doctest_fixtures/doctest_fixtures_fixtures.pyc differ
diff --git a/functional_tests/doc_tests/test_init_plugin/example.cfg b/functional_tests/doc_tests/test_init_plugin/example.cfg
new file mode 100644 (file)
index 0000000..b02ac0e
--- /dev/null
@@ -0,0 +1,3 @@
+[DEFAULT]
+can_frobnicate = 1
+likes_cheese = 0
diff --git a/functional_tests/doc_tests/test_init_plugin/init_plugin.rst b/functional_tests/doc_tests/test_init_plugin/init_plugin.rst
new file mode 100644 (file)
index 0000000..6c64029
--- /dev/null
@@ -0,0 +1,164 @@
+Running Initialization Code Before the Test Run
+-----------------------------------------------
+
+Many applications, especially those using web frameworks like Pylons_
+or Django_, can't be tested without first being configured or
+otherwise initialized. Plugins can fulfill this requirement by
+implementing :meth:`begin() <nose.plugins.base.IPluginInterface.begin>`.
+
+In this example, we'll use a very simple example: a widget class that
+can't be tested without a configuration.
+
+Here's the widget class. It's configured at the class or instance
+level by setting the ``cfg`` attribute to a dictionary.
+
+    >>> class ConfigurableWidget(object):
+    ...     cfg = None
+    ...     def can_frobnicate(self):
+    ...         return self.cfg.get('can_frobnicate', True)
+    ...     def likes_cheese(self):
+    ...         return self.cfg.get('likes_cheese', True)
+
+The tests verify that the widget's methods can be called without
+raising any exceptions.
+
+    >>> import unittest
+    >>> class TestConfigurableWidget(unittest.TestCase):
+    ...     longMessage = False
+    ...     def setUp(self):
+    ...         self.widget = ConfigurableWidget()
+    ...     def test_can_frobnicate(self):
+    ...         """Widgets can frobnicate (or not)"""
+    ...         self.widget.can_frobnicate()
+    ...     def test_likes_cheese(self):
+    ...         """Widgets might like cheese"""
+    ...         self.widget.likes_cheese()
+    ...     def shortDescription(self): # 2.7 compat
+    ...         try:
+    ...             doc = self._testMethodDoc
+    ...         except AttributeError:
+    ...             # 2.4 compat
+    ...             doc = self._TestCase__testMethodDoc
+    ...         return doc and doc.split("\n")[0].strip() or None
+
+The tests are bundled into a suite that we can pass to the test runner.
+
+    >>> def suite():
+    ...     return unittest.TestSuite([
+    ...         TestConfigurableWidget('test_can_frobnicate'),
+    ...         TestConfigurableWidget('test_likes_cheese')])
+
+When we run tests without first configuring the ConfigurableWidget,
+the tests fail.
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v']
+    >>> run(argv=argv, suite=suite())  # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ERROR
+    Widgets might like cheese ... ERROR
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Widgets can frobnicate (or not)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError: 'NoneType' object has no attribute 'get'
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Widgets might like cheese
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError: 'NoneType' object has no attribute 'get'
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=2)
+
+To configure the widget system before running tests, write a plugin
+that implements :meth:`begin() <nose.plugins.base.IPluginInterface.begin>`
+and initializes the system with a hard-coded configuration. (Later, we'll
+write a better plugin that accepts a command-line argument specifying the
+configuration file.)
+
+    >>> from nose.plugins import Plugin
+    >>> class ConfiguringPlugin(Plugin):
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     def begin(self):
+    ...         ConfigurableWidget.cfg = {}
+
+Now configure and execute a new test run using the plugin, which will
+inject the hard-coded configuration.
+
+    >>> run(argv=argv, suite=suite(),
+    ...     plugins=[ConfiguringPlugin()])  # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ok
+    Widgets might like cheese ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+This time the tests pass, because the widget class is configured.
+
+But the ConfiguringPlugin is pretty lame -- the configuration it
+installs is hard coded. A better plugin would allow the user to
+specify a configuration file on the command line:
+
+    >>> class BetterConfiguringPlugin(Plugin):
+    ...     def options(self, parser, env={}):
+    ...         parser.add_option('--widget-config', action='store',
+    ...                           dest='widget_config', default=None,
+    ...                           help='Specify path to widget config file')
+    ...     def configure(self, options, conf):
+    ...         if options.widget_config:
+    ...             self.load_config(options.widget_config)
+    ...             self.enabled = True
+    ...     def begin(self):
+    ...         ConfigurableWidget.cfg = self.cfg
+    ...     def load_config(self, path):
+    ...         from ConfigParser import ConfigParser
+    ...         p = ConfigParser()
+    ...         p.read([path])
+    ...         self.cfg = dict(p.items('DEFAULT'))
+
+To use the plugin, we need a config file.
+
+    >>> import os
+    >>> cfg_file = os.path.join(os.path.dirname(__file__), 'example.cfg')
+    >>> bytes = open(cfg_file, 'w').write("""\
+    ... [DEFAULT]
+    ... can_frobnicate = 1
+    ... likes_cheese = 0
+    ... """)
+
+Now we can execute a test run using that configuration, after first
+resetting the widget system to an unconfigured state.
+
+    >>> ConfigurableWidget.cfg = None
+    >>> argv = [__file__, '-v', '--widget-config', cfg_file]
+    >>> run(argv=argv, suite=suite(),
+    ...     plugins=[BetterConfiguringPlugin()]) # doctest: +REPORT_NDIFF
+    Widgets can frobnicate (or not) ... ok
+    Widgets might like cheese ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+.. _Pylons: http://pylonshq.com/
+.. _Django: http://www.djangoproject.com/
diff --git a/functional_tests/doc_tests/test_init_plugin/init_plugin.rst.py3.patch b/functional_tests/doc_tests/test_init_plugin/init_plugin.rst.py3.patch
new file mode 100644 (file)
index 0000000..90a0a44
--- /dev/null
@@ -0,0 +1,10 @@
+--- init_plugin.rst.orig       2010-08-31 10:36:54.000000000 -0700
++++ init_plugin.rst    2010-08-31 10:37:30.000000000 -0700
+@@ -143,6 +143,7 @@
+     ... can_frobnicate = 1
+     ... likes_cheese = 0
+     ... """)
++    46
+ Now we can execute a test run using that configuration, after first
+ resetting the widget system to an unconfigured state.
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__$py.class b/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__$py.class
new file mode 100644 (file)
index 0000000..1ccf3e4
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.py b/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.pyc b/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.pyc
new file mode 100644 (file)
index 0000000..d3e47f0
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/unwanted_package/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class b/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class
new file mode 100644 (file)
index 0000000..2998cfd
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam$py.class differ
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.py b/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.py
new file mode 100644 (file)
index 0000000..cfd1cc1
--- /dev/null
@@ -0,0 +1,3 @@
+def test_spam():
+    assert True
+
diff --git a/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc b/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc
new file mode 100644 (file)
index 0000000..5598905
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/unwanted_package/test_spam.pyc differ
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__$py.class b/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__$py.class
new file mode 100644 (file)
index 0000000..bb74c14
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.py b/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.pyc b/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.pyc
new file mode 100644 (file)
index 0000000..5bd67e3
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/wanted_package/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class b/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class
new file mode 100644 (file)
index 0000000..b32d690
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs$py.class differ
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.py b/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.py
new file mode 100644 (file)
index 0000000..bb65550
--- /dev/null
@@ -0,0 +1,3 @@
+def test_eggs():
+    assert True
+
diff --git a/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc b/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc
new file mode 100644 (file)
index 0000000..c12ec5f
Binary files /dev/null and b/functional_tests/doc_tests/test_issue089/support/wanted_package/test_eggs.pyc differ
diff --git a/functional_tests/doc_tests/test_issue089/unwanted_package.rst b/functional_tests/doc_tests/test_issue089/unwanted_package.rst
new file mode 100644 (file)
index 0000000..c7efc27
--- /dev/null
@@ -0,0 +1,70 @@
+Excluding Unwanted Packages
+---------------------------
+
+Normally, nose discovery descends into all packages. Plugins can
+change this behavior by implementing :meth:`IPluginInterface.wantDirectory()`.
+
+In this example, we have a wanted package called ``wanted_package``
+and an unwanted package called ``unwanted_package``. 
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> support_files = [d for d in os.listdir(support)
+    ...                  if not d.startswith('.')]
+    >>> support_files.sort()
+    >>> support_files
+    ['unwanted_package', 'wanted_package']
+
+When we run nose normally, tests are loaded from both packages. 
+
+.. Note ::
+
+   The function :func:`nose.plugins.plugintest.run` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    unwanted_package.test_spam.test_spam ... ok
+    wanted_package.test_eggs.test_eggs ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+To exclude the tests in the unwanted package, we can write a simple
+plugin that implements :meth:`IPluginInterface.wantDirectory()` and returns ``False`` if
+the basename of the directory is ``"unwanted_package"``. This will
+prevent nose from descending into the unwanted package.
+
+    >>> from nose.plugins import Plugin
+    >>> class UnwantedPackagePlugin(Plugin):
+    ...     # no command line arg needed to activate plugin
+    ...     enabled = True
+    ...     name = "unwanted-package"
+    ...     
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     
+    ...     def wantDirectory(self, dirname):
+    ...         want = None
+    ...         if os.path.basename(dirname) == "unwanted_package":
+    ...             want = False
+    ...         return want
+
+In the next test run we use the plugin, and the unwanted package is
+not discovered.
+
+    >>> run(argv=argv,
+    ...     plugins=[UnwantedPackagePlugin()]) # doctest: +REPORT_NDIFF    
+    wanted_package.test_eggs.test_eggs ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
\ No newline at end of file
diff --git a/functional_tests/doc_tests/test_issue097/plugintest_environment.rst b/functional_tests/doc_tests/test_issue097/plugintest_environment.rst
new file mode 100644 (file)
index 0000000..99b37cf
--- /dev/null
@@ -0,0 +1,160 @@
+nose.plugins.plugintest, os.environ and sys.argv
+------------------------------------------------
+
+:class:`nose.plugins.plugintest.PluginTester` and
+:func:`nose.plugins.plugintest.run` are utilities for testing nose
+plugins.  When testing plugins, it should be possible to control the
+environment seen plugins under test, and that environment should never
+be affected by ``os.environ`` or ``sys.argv``.
+
+    >>> import os
+    >>> import sys
+    >>> import unittest
+    >>> import nose.config
+    >>> from nose.plugins import Plugin
+    >>> from nose.plugins.builtin import FailureDetail, Capture
+    >>> from nose.plugins.plugintest import PluginTester
+
+Our test plugin takes no command-line arguments and simply prints the
+environment it's given by nose.
+
+    >>> class PrintEnvPlugin(Plugin):
+    ...     name = "print-env"
+    ...
+    ...     # no command line arg needed to activate plugin
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         if not self.can_configure:
+    ...             return
+    ...         self.conf = conf
+    ...
+    ...     def options(self, parser, env={}):
+    ...         print "env:", env
+
+To test the argv, we use a config class that prints the argv it's
+given by nose.  We need to monkeypatch nose.config.Config, so that we
+can test the cases where that is used as the default.
+
+    >>> old_config = nose.config.Config
+    >>> class PrintArgvConfig(old_config):
+    ...
+    ...     def configure(self, argv=None, doc=None):
+    ...         print "argv:", argv
+    ...         old_config.configure(self, argv, doc)
+    >>> nose.config.Config = PrintArgvConfig
+
+The class under test, PluginTester, is designed to be used by
+subclassing.
+
+    >>> class Tester(PluginTester):
+    ...    activate = "-v"
+    ...    plugins = [PrintEnvPlugin(),
+    ...               FailureDetail(),
+    ...               Capture(),
+    ...               ]
+    ...
+    ...    def makeSuite(self):
+    ...        return unittest.TestSuite(tests=[])
+
+For the purposes of this test, we need a known ``os.environ`` and
+``sys.argv``.
+
+    >>> old_environ = os.environ
+    >>> old_argv = sys.argv
+    >>> os.environ = {"spam": "eggs"}
+    >>> sys.argv = ["spamtests"]
+
+PluginTester always uses the [nosetests, self.activate] as its argv.
+If ``env`` is not overridden, the default is an empty ``env``.
+
+    >>> tester = Tester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {}
+
+An empty ``env`` is respected...
+
+    >>> class EmptyEnvTester(Tester):
+    ...    env = {}
+    >>> tester = EmptyEnvTester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {}
+
+... as is a non-empty ``env``.
+
+    >>> class NonEmptyEnvTester(Tester):
+    ...    env = {"foo": "bar"}
+    >>> tester = NonEmptyEnvTester()
+    >>> tester.setUp()
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+
+
+``nose.plugins.plugintest.run()`` should work analogously.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+    >>> run(env={},
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+    >>> run(env={"foo": "bar"},
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+An explicit argv parameter is honoured:
+
+    >>> run(argv=["spam"],
+    ...     suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[PrintEnvPlugin()]) # doctest: +REPORT_NDIFF
+    argv: ['spam']
+    env: {}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+An explicit config parameter with an env is honoured:
+
+    >>> from nose.plugins.manager import PluginManager
+    >>> manager = PluginManager(plugins=[PrintEnvPlugin()])
+    >>> config = PrintArgvConfig(env={"foo": "bar"}, plugins=manager)
+    >>> run(config=config,
+    ...     suite=unittest.TestSuite(tests=[])) # doctest: +REPORT_NDIFF
+    argv: ['nosetests', '-v']
+    env: {'foo': 'bar'}
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+
+Clean up.
+
+    >>> os.environ = old_environ
+    >>> sys.argv = old_argv
+    >>> nose.config.Config = old_config
diff --git a/functional_tests/doc_tests/test_issue107/plugin_exceptions.rst b/functional_tests/doc_tests/test_issue107/plugin_exceptions.rst
new file mode 100644 (file)
index 0000000..2c595f0
--- /dev/null
@@ -0,0 +1,149 @@
+When Plugins Fail
+-----------------
+
+Plugin methods should not fail silently. When a plugin method raises
+an exception before or during the execution of a test, the exception
+will be wrapped in a :class:`nose.failure.Failure` instance and appear as a
+failing test. Exceptions raised at other times, such as in the
+preparation phase with ``prepareTestLoader`` or ``prepareTestResult``,
+or after a test executes, in ``afterTest`` will stop the entire test
+run.
+
+    >>> import os
+    >>> import sys
+    >>> from nose.plugins import Plugin
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+Our first test plugins take no command-line arguments and raises
+AttributeError in beforeTest and afterTest. 
+
+    >>> class EnabledPlugin(Plugin):
+    ...     """Plugin that takes no command-line arguments"""
+    ...
+    ...     enabled = True
+    ...
+    ...     def configure(self, options, conf):
+    ...         pass
+    ...     def options(self, parser, env={}):
+    ...         pass    
+    >>> class FailBeforePlugin(EnabledPlugin):
+    ...     name = "fail-before"
+    ...            
+    ...     def beforeTest(self, test):
+    ...         raise AttributeError()    
+    >>> class FailAfterPlugin(EnabledPlugin):
+    ...     name = "fail-after"
+    ...            
+    ...     def afterTest(self, test):
+    ...         raise AttributeError()
+
+Running tests with the fail-before plugin enabled will result in all
+tests failing.
+
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> suitepath = os.path.join(support, 'test_spam.py')
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailBeforePlugin()])
+    EE
+    ======================================================================
+    ERROR: test_spam.test_spam
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError
+    <BLANKLINE>
+    ======================================================================
+    ERROR: test_spam.test_eggs
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AttributeError
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=2)
+
+But with the fail-after plugin, the entire test run will fail.
+
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailAfterPlugin()])
+    Traceback (most recent call last):
+    ...
+    AttributeError
+
+Likewise, since the next plugin fails in a preparatory method, outside
+of test execution, the entire test run fails when the plugin is used.
+
+    >>> class FailPreparationPlugin(EnabledPlugin):
+    ...     name = "fail-prepare"
+    ...     
+    ...     def prepareTestLoader(self, loader):
+    ...         raise TypeError("That loader is not my type")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailPreparationPlugin()])
+    Traceback (most recent call last):
+    ...
+    TypeError: That loader is not my type
+
+
+Even AttributeErrors and TypeErrors are not silently suppressed as
+they used to be for some generative plugin methods (issue152).
+
+These methods caught TypeError and AttributeError and did not record
+the exception, before issue152 was fixed: .loadTestsFromDir(),
+.loadTestsFromModule(), .loadTestsFromTestCase(),
+loadTestsFromTestClass, and .makeTest().  Now, the exception is
+caught, but logged as a Failure.
+
+    >>> class FailLoadPlugin(EnabledPlugin):
+    ...     name = "fail-load"
+    ...     
+    ...     def loadTestsFromModule(self, module):
+    ...         # we're testing exception handling behaviour during
+    ...         # iteration, so be a generator function, without
+    ...         # actually yielding any tests
+    ...         if False:
+    ...             yield None
+    ...         raise TypeError("bug in plugin")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailLoadPlugin()])
+    ..E
+    ======================================================================
+    ERROR: Failure: TypeError (bug in plugin)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    TypeError: bug in plugin
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
+
+
+Also, before issue152 was resolved, .loadTestsFromFile() and
+.loadTestsFromName() didn't catch these errors at all, so the
+following test would crash nose:
+
+    >>> class FailLoadFromNamePlugin(EnabledPlugin):
+    ...     name = "fail-load-from-name"
+    ...     
+    ...     def loadTestsFromName(self, name, module=None, importPath=None):
+    ...         if False:
+    ...             yield None
+    ...         raise TypeError("bug in plugin")
+    >>> run(argv=['nosetests', suitepath],
+    ...     plugins=[FailLoadFromNamePlugin()])
+    E
+    ======================================================================
+    ERROR: Failure: TypeError (bug in plugin)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    TypeError: bug in plugin
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
diff --git a/functional_tests/doc_tests/test_issue107/support/test_spam$py.class b/functional_tests/doc_tests/test_issue107/support/test_spam$py.class
new file mode 100644 (file)
index 0000000..a06d9e9
Binary files /dev/null and b/functional_tests/doc_tests/test_issue107/support/test_spam$py.class differ
diff --git a/functional_tests/doc_tests/test_issue107/support/test_spam.py b/functional_tests/doc_tests/test_issue107/support/test_spam.py
new file mode 100644 (file)
index 0000000..4c1b8fb
--- /dev/null
@@ -0,0 +1,5 @@
+def test_spam():
+    assert True
+
+def test_eggs():
+    pass
diff --git a/functional_tests/doc_tests/test_issue107/support/test_spam.pyc b/functional_tests/doc_tests/test_issue107/support/test_spam.pyc
new file mode 100644 (file)
index 0000000..e1b44f6
Binary files /dev/null and b/functional_tests/doc_tests/test_issue107/support/test_spam.pyc differ
diff --git a/functional_tests/doc_tests/test_issue119/empty_plugin.rst b/functional_tests/doc_tests/test_issue119/empty_plugin.rst
new file mode 100644 (file)
index 0000000..6194c19
--- /dev/null
@@ -0,0 +1,57 @@
+Minimal plugin
+--------------
+
+Plugins work as long as they implement the minimal interface required
+by nose.plugins.base. They do not have to derive from
+nose.plugins.Plugin.
+
+    >>> class NullPlugin(object):
+    ...
+    ...     enabled = True
+    ...     name = "null"
+    ...     score = 100
+    ...
+    ...     def options(self, parser, env):
+    ...         pass
+    ...
+    ...     def configure(self, options, conf):
+    ...         pass
+    >>> import unittest
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[NullPlugin()]) # doctest: +REPORT_NDIFF
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+Plugins can derive from nose.plugins.base and do nothing except set a
+name.
+
+    >>> import os
+    >>> from nose.plugins import Plugin
+    >>> class DerivedNullPlugin(Plugin):
+    ...
+    ...     name = "derived-null"
+
+Enabled plugin that's otherwise empty
+
+    >>> class EnabledDerivedNullPlugin(Plugin):
+    ...
+    ...     enabled = True
+    ...     name = "enabled-derived-null"
+    ...
+    ...     def options(self, parser, env=os.environ):
+    ...         pass
+    ...
+    ...     def configure(self, options, conf):
+    ...         if not self.can_configure:
+    ...             return
+    ...         self.conf = conf
+    >>> run(suite=unittest.TestSuite(tests=[]),
+    ...     plugins=[DerivedNullPlugin(), EnabledDerivedNullPlugin()])
+    ...     # doctest: +REPORT_NDIFF
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
diff --git a/functional_tests/doc_tests/test_issue119/test_zeronine$py.class b/functional_tests/doc_tests/test_issue119/test_zeronine$py.class
new file mode 100644 (file)
index 0000000..8138d8c
Binary files /dev/null and b/functional_tests/doc_tests/test_issue119/test_zeronine$py.class differ
diff --git a/functional_tests/doc_tests/test_issue119/test_zeronine.py b/functional_tests/doc_tests/test_issue119/test_zeronine.py
new file mode 100644 (file)
index 0000000..6a4f450
--- /dev/null
@@ -0,0 +1,26 @@
+import os
+import unittest
+from nose.plugins import Plugin
+from nose.plugins.plugintest import PluginTester
+from nose.plugins.manager import ZeroNinePlugin
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+support = os.path.join(os.path.dirname(os.path.dirname(here)), 'support')
+
+
+class EmptyPlugin(Plugin):
+    pass
+
+class TestEmptyPlugin(PluginTester, unittest.TestCase):
+    activate = '--with-empty'
+    plugins = [ZeroNinePlugin(EmptyPlugin())]
+    suitepath = os.path.join(here, 'empty_plugin.rst')
+
+    def test_empty_zero_nine_does_not_crash(self):
+        print self.output
+        assert "'EmptyPlugin' object has no attribute 'loadTestsFromPath'" \
+            not in self.output
+
+    
+
diff --git a/functional_tests/doc_tests/test_issue119/test_zeronine.pyc b/functional_tests/doc_tests/test_issue119/test_zeronine.pyc
new file mode 100644 (file)
index 0000000..4e1e946
Binary files /dev/null and b/functional_tests/doc_tests/test_issue119/test_zeronine.pyc differ
diff --git a/functional_tests/doc_tests/test_issue142/errorclass_failure.rst b/functional_tests/doc_tests/test_issue142/errorclass_failure.rst
new file mode 100644 (file)
index 0000000..c4ce287
--- /dev/null
@@ -0,0 +1,124 @@
+Failure of Errorclasses
+-----------------------
+
+Errorclasses (skips, deprecations, etc.) define whether or not they
+represent test failures.
+
+    >>> import os
+    >>> import sys
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> from nose.plugins.skip import Skip
+    >>> from nose.plugins.deprecated import Deprecated
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> sys.path.insert(0, support)
+    >>> from errorclass_failure_plugin import Todo, TodoPlugin, \
+    ...                                       NonFailureTodoPlugin
+    >>> todo_test = os.path.join(support, 'errorclass_failing_test.py')
+    >>> misc_test = os.path.join(support, 'errorclass_tests.py')
+
+nose.plugins.errorclass.ErrorClass has an argument ``isfailure``. With a
+true isfailure, when the errorclass' exception is raised by a test,
+tracebacks are printed.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", todo_test],
+    ...     plugins=[TodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    errorclass_failing_test.test_2 ... ok
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_failing_test.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    FAILED (TODO=1)
+
+
+Also, ``--stop`` stops the test run.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", "--stop", todo_test],
+    ...     plugins=[TodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_failing_test.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (TODO=1)
+
+
+With a false .isfailure, errorclass exceptions raised by tests are
+treated as "ignored errors."  For ignored errors, tracebacks are not
+printed, and the test run does not stop.
+
+    >>> run(argv=["nosetests", "-v", "--with-non-failure-todo", "--stop",
+    ...           todo_test],
+    ...     plugins=[NonFailureTodoPlugin()])  # doctest: +REPORT_NDIFF
+    errorclass_failing_test.test_todo ... TODO: fix me
+    errorclass_failing_test.test_2 ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK (TODO=1)
+
+
+Exception detail strings of errorclass errors are always printed when
+-v is in effect, regardless of whether the error is ignored.  Note
+that exception detail strings may have more than one line.
+
+    >>> run(argv=["nosetests", "-v", "--with-todo", misc_test],
+    ...     plugins=[TodoPlugin(), Skip(), Deprecated()])
+    ... # doctest: +REPORT_NDIFF
+    errorclass_tests.test_todo ... TODO: fix me
+    errorclass_tests.test_2 ... ok
+    errorclass_tests.test_3 ... SKIP: skipety-skip
+    errorclass_tests.test_4 ... SKIP
+    errorclass_tests.test_5 ... DEPRECATED: spam
+    eggs
+    <BLANKLINE>
+    spam
+    errorclass_tests.test_6 ... DEPRECATED: spam
+    <BLANKLINE>
+    ======================================================================
+    TODO: errorclass_tests.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    FAILED (DEPRECATED=2, SKIP=2, TODO=1)
+
+Without -v, the exception detail strings are only displayed if the
+error is not ignored (otherwise, there's no traceback).
+
+    >>> run(argv=["nosetests", "--with-todo", misc_test],
+    ...     plugins=[TodoPlugin(), Skip(), Deprecated()])
+    ... # doctest: +REPORT_NDIFF
+    T.SSDD
+    ======================================================================
+    TODO: errorclass_tests.test_todo
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    Todo: fix me
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    FAILED (DEPRECATED=2, SKIP=2, TODO=1)
+
+>>> sys.path.remove(support)
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test$py.class b/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test$py.class
new file mode 100644 (file)
index 0000000..cf2de6b
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test$py.class differ
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.py b/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.py
new file mode 100644 (file)
index 0000000..fae3c75
--- /dev/null
@@ -0,0 +1,7 @@
+from errorclass_failure_plugin import Todo
+
+def test_todo():
+    raise Todo("fix me")
+
+def test_2():
+    pass
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.pyc b/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.pyc
new file mode 100644 (file)
index 0000000..47adf54
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_failing_test.pyc differ
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class b/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class
new file mode 100644 (file)
index 0000000..a77f915
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin$py.class differ
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.py b/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.py
new file mode 100644 (file)
index 0000000..927c986
--- /dev/null
@@ -0,0 +1,16 @@
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+class Todo(Exception):
+    pass
+
+class TodoPlugin(ErrorClassPlugin):
+
+    name = "todo"
+
+    todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+class NonFailureTodoPlugin(ErrorClassPlugin):
+
+    name = "non-failure-todo"
+
+    todo = ErrorClass(Todo, label='TODO', isfailure=False)
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc b/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc
new file mode 100644 (file)
index 0000000..7d5ca37
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_failure_plugin.pyc differ
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_tests$py.class b/functional_tests/doc_tests/test_issue142/support/errorclass_tests$py.class
new file mode 100644 (file)
index 0000000..9993086
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_tests$py.class differ
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_tests.py b/functional_tests/doc_tests/test_issue142/support/errorclass_tests.py
new file mode 100644 (file)
index 0000000..4981224
--- /dev/null
@@ -0,0 +1,20 @@
+from errorclass_failure_plugin import Todo
+from nose import SkipTest, DeprecatedTest
+
+def test_todo():
+    raise Todo('fix me')
+
+def test_2():
+    pass
+
+def test_3():
+    raise SkipTest('skipety-skip')
+
+def test_4():
+    raise SkipTest()
+
+def test_5():
+    raise DeprecatedTest('spam\neggs\n\nspam')
+
+def test_6():
+    raise DeprecatedTest('spam')
diff --git a/functional_tests/doc_tests/test_issue142/support/errorclass_tests.pyc b/functional_tests/doc_tests/test_issue142/support/errorclass_tests.pyc
new file mode 100644 (file)
index 0000000..9e95e34
Binary files /dev/null and b/functional_tests/doc_tests/test_issue142/support/errorclass_tests.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/imported_tests.rst b/functional_tests/doc_tests/test_issue145/imported_tests.rst
new file mode 100644 (file)
index 0000000..c4eee78
--- /dev/null
@@ -0,0 +1,117 @@
+Importing Tests
+---------------
+
+When a package imports tests from another package, the tests are
+**completely** relocated into the importing package. This means that the
+fixtures from the source package are **not** run when the tests in the
+importing package are executed.
+
+For example, consider this collection of packages:
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> from nose.util import ls_tree
+    >>> print ls_tree(support) # doctest: +REPORT_NDIFF
+    |-- package1
+    |   |-- __init__.py
+    |   `-- test_module.py
+    |-- package2c
+    |   |-- __init__.py
+    |   `-- test_module.py
+    `-- package2f
+        |-- __init__.py
+        `-- test_module.py
+
+In these packages, the tests are all defined in package1, and are imported
+into package2f and package2c.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+package1 has fixtures, which we can see by running all of the tests. Note
+below that the test names reflect the modules into which the tests are
+imported, not the source modules.
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package1 setup
+    test (package1.test_module.TestCase) ... ok
+    package1.test_module.TestClass.test_class ... ok
+    package1.test_module.test_function ... ok
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    package2c.test_module.TestClass.test_class ... ok
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 6 tests in ...s
+    <BLANKLINE>
+    OK
+
+When tests are run in package2f or package2c, only the fixtures from those
+packages are executed.
+
+    >>> argv = [__file__, '-v', os.path.join(support, 'package2f')]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v', os.path.join(support, 'package2c')]
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    package2c.test_module.TestClass.test_class ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
+
+This also applies when only the specific tests are selected via the
+command-line.
+
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2c', 'test_module.py') +
+    ...         ':TestClass.test_class']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    package2c.test_module.TestClass.test_class ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2c', 'test_module.py') +
+    ...         ':TestCase.test']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2c setup
+    test (package2c.test_module.TestCase) ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+    >>> argv = [__file__, '-v',
+    ...         os.path.join(support, 'package2f', 'test_module.py') +
+    ...         ':test_function']
+    >>> run(argv=argv) # doctest: +REPORT_NDIFF
+    package2f setup
+    package2f.test_module.test_function ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/__init__$py.class b/functional_tests/doc_tests/test_issue145/support/package1/__init__$py.class
new file mode 100644 (file)
index 0000000..1186513
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package1/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/__init__.py b/functional_tests/doc_tests/test_issue145/support/package1/__init__.py
new file mode 100644 (file)
index 0000000..c715fdb
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package1 setup'
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/__init__.pyc b/functional_tests/doc_tests/test_issue145/support/package1/__init__.pyc
new file mode 100644 (file)
index 0000000..8000d71
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package1/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/test_module$py.class b/functional_tests/doc_tests/test_issue145/support/package1/test_module$py.class
new file mode 100644 (file)
index 0000000..2566ada
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package1/test_module$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/test_module.py b/functional_tests/doc_tests/test_issue145/support/package1/test_module.py
new file mode 100644 (file)
index 0000000..0c5ac78
--- /dev/null
@@ -0,0 +1,12 @@
+import unittest
+
+def test_function():
+    pass
+
+class TestClass:
+    def test_class(self):
+        pass
+
+class TestCase(unittest.TestCase):
+    def test(self):
+        pass
diff --git a/functional_tests/doc_tests/test_issue145/support/package1/test_module.pyc b/functional_tests/doc_tests/test_issue145/support/package1/test_module.pyc
new file mode 100644 (file)
index 0000000..9d033ce
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package1/test_module.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/__init__$py.class b/functional_tests/doc_tests/test_issue145/support/package2c/__init__$py.class
new file mode 100644 (file)
index 0000000..ddc721a
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2c/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/__init__.py b/functional_tests/doc_tests/test_issue145/support/package2c/__init__.py
new file mode 100644 (file)
index 0000000..106401f
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package2c setup'
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/__init__.pyc b/functional_tests/doc_tests/test_issue145/support/package2c/__init__.pyc
new file mode 100644 (file)
index 0000000..394ffd8
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2c/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/test_module$py.class b/functional_tests/doc_tests/test_issue145/support/package2c/test_module$py.class
new file mode 100644 (file)
index 0000000..4a98b21
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2c/test_module$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/test_module.py b/functional_tests/doc_tests/test_issue145/support/package2c/test_module.py
new file mode 100644 (file)
index 0000000..6affbf1
--- /dev/null
@@ -0,0 +1 @@
+from package1.test_module import TestClass, TestCase
diff --git a/functional_tests/doc_tests/test_issue145/support/package2c/test_module.pyc b/functional_tests/doc_tests/test_issue145/support/package2c/test_module.pyc
new file mode 100644 (file)
index 0000000..f231327
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2c/test_module.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/__init__$py.class b/functional_tests/doc_tests/test_issue145/support/package2f/__init__$py.class
new file mode 100644 (file)
index 0000000..704c4c7
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2f/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/__init__.py b/functional_tests/doc_tests/test_issue145/support/package2f/__init__.py
new file mode 100644 (file)
index 0000000..fc203eb
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    print 'package2f setup'
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/__init__.pyc b/functional_tests/doc_tests/test_issue145/support/package2f/__init__.pyc
new file mode 100644 (file)
index 0000000..28350da
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2f/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/test_module$py.class b/functional_tests/doc_tests/test_issue145/support/package2f/test_module$py.class
new file mode 100644 (file)
index 0000000..98feca7
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2f/test_module$py.class differ
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/test_module.py b/functional_tests/doc_tests/test_issue145/support/package2f/test_module.py
new file mode 100644 (file)
index 0000000..e353c62
--- /dev/null
@@ -0,0 +1 @@
+from package1.test_module import test_function
diff --git a/functional_tests/doc_tests/test_issue145/support/package2f/test_module.pyc b/functional_tests/doc_tests/test_issue145/support/package2f/test_module.pyc
new file mode 100644 (file)
index 0000000..a84068d
Binary files /dev/null and b/functional_tests/doc_tests/test_issue145/support/package2f/test_module.pyc differ
diff --git a/functional_tests/doc_tests/test_multiprocess/multiprocess.rst b/functional_tests/doc_tests/test_multiprocess/multiprocess.rst
new file mode 100644 (file)
index 0000000..d463ba0
--- /dev/null
@@ -0,0 +1,269 @@
+Parallel Testing with nose
+--------------------------
+
+.. Note ::
+
+   Use of the multiprocess plugin on python 2.5 or earlier requires
+   the multiprocessing_ module, available from PyPI and at
+   http://code.google.com/p/python-multiprocessing/.
+
+..
+
+Using the `nose.plugin.multiprocess` plugin, you can parallelize a
+test run across a configurable number of worker processes. While this can
+speed up CPU-bound test runs, it is mainly useful for IO-bound tests
+that spend most of their time waiting for data to arrive from someplace
+else and can benefit from parallelization.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process separately,
+and to have enough worker processes that the entire test run takes only as
+long as the slowest test. This ideal is not attainable in all cases, however,
+because many test suites depend on context (class, module or package)
+fixtures.
+
+Some context fixtures are re-entrant -- that is, they can be called many times
+concurrently. Other context fixtures can be shared among tests running in
+different processes. Still others must be run once and only once for a given
+set of tests, and must be in the same process as the tests themselves.
+
+The plugin can't know the difference between these types of context fixtures
+unless you tell it, so the default behavior is to dispatch the entire context
+suite to a worker as a unit. This way, the fixtures are run once, in the same
+process as the tests. (That, of course, is how they are run when the plugin
+is not active: All tests are run in a single process.)
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute multiple times, typically once per test, and concurrently.
+
+For example, a module that contains re-entrant fixtures might look like::
+
+  _multiprocess_can_split_ = True
+
+  def setup():
+      ...
+
+A class might look like::
+
+  class TestClass:
+      _multiprocess_can_split_ = True
+
+      @classmethod
+      def setup_class(cls):
+          ...
+
+Alternatively, if a context's fixtures may only be run once, or may not run
+concurrently, but *may* be shared by tests running in different processes
+-- for instance a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. Fixtures for contexts so marked will execute in the primary nose
+process, and tests in those contexts will be individually dispatched to run in
+parallel.
+
+A module with shareable fixtures might look like::
+
+  _multiprocess_shared_ = True
+
+  def setup():
+      ...
+
+A class might look like::
+
+  class TestClass:
+      _multiprocess_shared_ = True
+
+      @classmethod
+      def setup_class(cls):
+          ...
+
+These options are mutually exclusive: you can't mark a context as both
+splittable and shareable.
+
+Example
+~~~~~~~
+
+Consider three versions of the same test suite. One
+is marked ``_multiprocess_shared_``, another ``_multiprocess_can_split_``,
+and the third is unmarked. They all define the same fixtures:
+
+    called = []
+
+    def setup():
+        print "setup called"
+        called.append('setup')
+
+    def teardown():
+        print "teardown called"
+        called.append('teardown')
+
+And each has two tests that just test that ``setup()`` has been called
+once and only once.
+
+When run without the multiprocess plugin, fixtures for the shared,
+can-split and not-shared test suites execute at the same times, and
+all tests pass.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> test_not_shared = os.path.join(support, 'test_not_shared.py')
+    >>> test_shared = os.path.join(support, 'test_shared.py')
+    >>> test_can_split = os.path.join(support, 'test_can_split.py')
+
+The module with shared fixtures passes.
+
+    >>> run(argv=['nosetests', '-v', test_shared]) #doctest: +REPORT_NDIFF
+    setup called
+    test_shared.TestMe.test_one ... ok
+    test_shared.test_a ... ok
+    test_shared.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+As does the module with no fixture annotations.
+
+    >>> run(argv=['nosetests', '-v', test_not_shared]) #doctest: +REPORT_NDIFF
+    setup called
+    test_not_shared.TestMe.test_one ... ok
+    test_not_shared.test_a ... ok
+    test_not_shared.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+And the module that marks its fixtures as re-entrant.
+
+    >>> run(argv=['nosetests', '-v', test_can_split]) #doctest: +REPORT_NDIFF
+    setup called
+    test_can_split.TestMe.test_one ... ok
+    test_can_split.test_a ... ok
+    test_can_split.test_b ... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+However, when run with the ``--processes=2`` switch, each test module
+behaves differently.
+
+    >>> from nose.plugins.multiprocess import MultiProcess
+
+The module marked ``_multiprocess_shared_`` executes correctly, although as with
+any use of the multiprocess plugin, the order in which the tests execute is
+indeterminate.
+
+First we have to reset all of the test modules.
+
+    >>> import sys
+    >>> sys.modules['test_not_shared'].called[:] = []
+    >>> sys.modules['test_can_split'].called[:] = []
+
+Then we can run the tests again with the multiprocess plugin active.
+    
+    >>> run(argv=['nosetests', '-v', '--processes=2', test_shared],
+    ...     plugins=[MultiProcess()]) #doctest: +ELLIPSIS
+    setup called
+    test_shared.... ok
+    teardown called
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    OK
+
+As does the one not marked -- however in this case, ``--processes=2``
+will do *nothing at all*: since the tests are in a module with
+unmarked fixtures, the entire test module will be dispatched to a
+single runner process.
+
+However, the module marked ``_multiprocess_can_split_`` will fail, since
+the fixtures *are not reentrant*. A module such as this *must not* be
+marked ``_multiprocess_can_split_``, or tests will fail in one or more
+runner processes as fixtures are re-executed.
+
+We have to reset all of the test modules again.
+
+    >>> import sys
+    >>> sys.modules['test_not_shared'].called[:] = []
+    >>> sys.modules['test_can_split'].called[:] = []
+
+Then we can run again and see the failures.
+
+    >>> run(argv=['nosetests', '-v', '--processes=2', test_can_split],
+    ...     plugins=[MultiProcess()]) #doctest: +ELLIPSIS
+    setup called
+    teardown called
+    test_can_split....
+    ...
+    FAILED (failures=...)
+
+Other differences in test running
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The main difference between using the multiprocess plugin and not doing so
+is obviously that tests run concurrently under multiprocess. However, there
+are a few other differences that may impact your test suite:
+
+* More tests may be found
+
+  Because tests are dispatched to worker processes by name, a worker
+  process may find and run tests in a module that would not be found during a
+  normal test run. For instance, if a non-test module contains a test-like
+  function, that function would be discovered as a test in a worker process
+  if the entire module is dispatched to the worker. This is because worker
+  processes load tests in *directed* mode -- the same way that nose loads
+  tests when you explicitly name a module -- rather than in *discovered* mode,
+  the mode nose uses when looking for tests in a directory.
+
+* Out-of-order output
+
+  Test results are collected by workers and returned to the master process for
+  output. Since different processes may complete their tests at different
+  times, test result output order is not determinate.
+
+* Plugin interaction warning
+
+  The multiprocess plugin does not work well with other plugins that expect to
+  wrap or gain control of the test-running process. Examples from nose's 
+  builtin plugins include coverage and profiling: a test run using
+  both multiprocess and either of those is likely to fail in some
+  confusing and spectacular way.
+
+* Python 2.6 warning
+
+  This is unlikely to impact you unless you are writing tests for nose itself,
+  but be aware that under python 2.6, the multiprocess plugin is not
+  re-entrant. For example, when running nose with the plugin active, you can't
+  use subprocess to launch another copy of nose that also uses the
+  multiprocess plugin. This is why this test is skipped under python 2.6 when
+  run with the ``--processes`` switch.
diff --git a/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures$py.class b/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures$py.class
new file mode 100644 (file)
index 0000000..c373f17
Binary files /dev/null and b/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures$py.class differ
diff --git a/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.py b/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.py
new file mode 100644 (file)
index 0000000..8b4c3af
--- /dev/null
@@ -0,0 +1,16 @@
+from nose.plugins.skip import SkipTest
+from nose.plugins.multiprocess import MultiProcess
+
+_multiprocess_can_split_ = True
+
+def setup_module():
+    try:
+        import multiprocessing
+        if 'active' in MultiProcess.status:
+            raise SkipTest("Multiprocess plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("multiprocessing module not available")
+
+
+
diff --git a/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.pyc b/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.pyc
new file mode 100644 (file)
index 0000000..a617865
Binary files /dev/null and b/functional_tests/doc_tests/test_multiprocess/multiprocess_fixtures.pyc differ
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_can_split.py b/functional_tests/doc_tests/test_multiprocess/support/test_can_split.py
new file mode 100644 (file)
index 0000000..a7ae6e7
--- /dev/null
@@ -0,0 +1,30 @@
+import sys
+called = []
+
+_multiprocess_can_split_ = 1
+
+def setup():
+    print >> sys.stderr, "setup called"
+    called.append('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    called.append('teardown')
+
+
+def test_a():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_can_split.pyc b/functional_tests/doc_tests/test_multiprocess/support/test_can_split.pyc
new file mode 100644 (file)
index 0000000..311493e
Binary files /dev/null and b/functional_tests/doc_tests/test_multiprocess/support/test_can_split.pyc differ
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_not_shared.py b/functional_tests/doc_tests/test_multiprocess/support/test_not_shared.py
new file mode 100644 (file)
index 0000000..8542109
--- /dev/null
@@ -0,0 +1,30 @@
+import sys
+called = []
+
+_multiprocess_ = 1
+
+def setup():
+    print >> sys.stderr, "setup called"
+    called.append('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    called.append('teardown')
+
+
+def test_a():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(called) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_not_shared.pyc b/functional_tests/doc_tests/test_multiprocess/support/test_not_shared.pyc
new file mode 100644 (file)
index 0000000..5d8a9ee
Binary files /dev/null and b/functional_tests/doc_tests/test_multiprocess/support/test_not_shared.pyc differ
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_shared.py b/functional_tests/doc_tests/test_multiprocess/support/test_shared.py
new file mode 100644 (file)
index 0000000..d8617f8
--- /dev/null
@@ -0,0 +1,49 @@
+import os
+import sys
+
+here = os.path.dirname(__file__)
+flag = os.path.join(here, 'shared_flag')
+
+_multiprocess_shared_ = 1
+
+def _log(val):
+    ff = open(flag, 'a+')
+    ff.write(val)
+    ff.write("\n")
+    ff.close()
+
+
+def _clear():
+    if os.path.isfile(flag):
+        os.unlink(flag)
+
+        
+def logged():
+    return [line for line in open(flag, 'r')]
+
+
+def setup():
+    print >> sys.stderr, "setup called"
+    _log('setup')
+
+
+def teardown():
+    print >> sys.stderr, "teardown called"
+    _clear()
+
+    
+def test_a():
+    assert len(logged()) == 1, "len(%s) !=1" % called
+
+
+def test_b():
+    assert len(logged()) == 1, "len(%s) !=1" % called
+
+
+class TestMe:
+    def setup_class(cls):
+        cls._setup = True
+    setup_class = classmethod(setup_class)
+
+    def test_one(self):
+        assert self._setup, "Class was not set up"
diff --git a/functional_tests/doc_tests/test_multiprocess/support/test_shared.pyc b/functional_tests/doc_tests/test_multiprocess/support/test_shared.pyc
new file mode 100644 (file)
index 0000000..d48dca8
Binary files /dev/null and b/functional_tests/doc_tests/test_multiprocess/support/test_shared.pyc differ
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst b/functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst
new file mode 100644 (file)
index 0000000..9513fdf
--- /dev/null
@@ -0,0 +1,89 @@
+Restricted Plugin Managers
+--------------------------
+
+In some cases, such as running under the ``python setup.py test`` command,
+nose is not able to use all available plugins. In those cases, a
+`nose.plugins.manager.RestrictedPluginManager` is used to exclude plugins that
+implement API methods that nose is unable to call.
+
+Support files for this test are in the support directory.
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+
+For this test, we'll use a simple plugin that implements the ``startTest``
+method.
+
+    >>> from nose.plugins.base import Plugin
+    >>> from nose.plugins.manager import RestrictedPluginManager
+    >>> class StartPlugin(Plugin):
+    ...     def startTest(self, test):
+    ...         print "started %s" % test
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+When run with a normal plugin manager, the plugin executes.
+
+    >>> argv = ['plugintest', '-v', '--with-startplugin', support]
+    >>> run(argv=argv, plugins=[StartPlugin()]) # doctest: +REPORT_NDIFF
+    started test.test
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+However, when run with a restricted plugin manager configured to exclude
+plugins implementing `startTest`, an exception is raised and nose exits.
+
+    >>> restricted = RestrictedPluginManager(
+    ...     plugins=[StartPlugin()], exclude=('startTest',), load=False)
+    >>> run(argv=argv, plugins=restricted) #doctest: +REPORT_NDIFF +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    SystemExit: ...
+
+Errors are only raised when options defined by excluded plugins are used.
+
+    >>> argv = ['plugintest', '-v', support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +REPORT_NDIFF
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+When a disabled option appears in a configuration file, instead of on the
+command line, a warning is raised instead of an exception.
+
+    >>> argv = ['plugintest', '-v', '-c', os.path.join(support, 'start.cfg'),
+    ...         support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+    RuntimeWarning: Option 'with-startplugin' in config file '...start.cfg' ignored: excluded by runtime environment
+    test.test ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    OK
+
+However, if an option appears in a configuration file that is not recognized
+either as an option defined by nose, or by an active or excluded plugin, an
+error is raised.
+
+    >>> argv = ['plugintest', '-v', '-c', os.path.join(support, 'bad.cfg'),
+    ...         support]
+    >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch b/functional_tests/doc_tests/test_restricted_plugin_options/restricted_plugin_options.rst.py3.patch
new file mode 100644 (file)
index 0000000..51a09b4
--- /dev/null
@@ -0,0 +1,9 @@
+--- restricted_plugin_options.rst.orig 2010-08-31 10:57:04.000000000 -0700
++++ restricted_plugin_options.rst      2010-08-31 10:57:51.000000000 -0700
+@@ -86,5 +86,5 @@
+     >>> run(argv=argv, plugins=restricted) # doctest: +ELLIPSIS
+     Traceback (most recent call last):
+     ...
+-    ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
++    nose.config.ConfigError: Error reading config file '...bad.cfg': no such option 'with-meltedcheese'
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/support/bad.cfg b/functional_tests/doc_tests/test_restricted_plugin_options/support/bad.cfg
new file mode 100644 (file)
index 0000000..c050ec4
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+with-meltedcheese=1
\ No newline at end of file
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/support/start.cfg b/functional_tests/doc_tests/test_restricted_plugin_options/support/start.cfg
new file mode 100644 (file)
index 0000000..ea1e289
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+with-startplugin=1
\ No newline at end of file
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/support/test$py.class b/functional_tests/doc_tests/test_restricted_plugin_options/support/test$py.class
new file mode 100644 (file)
index 0000000..2336360
Binary files /dev/null and b/functional_tests/doc_tests/test_restricted_plugin_options/support/test$py.class differ
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/support/test.py b/functional_tests/doc_tests/test_restricted_plugin_options/support/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/functional_tests/doc_tests/test_restricted_plugin_options/support/test.pyc b/functional_tests/doc_tests/test_restricted_plugin_options/support/test.pyc
new file mode 100644 (file)
index 0000000..46c102a
Binary files /dev/null and b/functional_tests/doc_tests/test_restricted_plugin_options/support/test.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/selector_plugin.rst b/functional_tests/doc_tests/test_selector_plugin/selector_plugin.rst
new file mode 100644 (file)
index 0000000..f5f7913
--- /dev/null
@@ -0,0 +1,119 @@
+Using a Custom Selector
+-----------------------
+
+By default, nose uses a `nose.selector.Selector` instance to decide
+what is and is not a test. The default selector is fairly simple: for
+the most part, if an object's name matches the ``testMatch`` regular
+expression defined in the active `nose.config.Config` instance, the
+object is selected as a test. 
+
+This behavior is fine for new projects, but may be undesireable for
+older projects with a different test naming scheme. Fortunately, you
+can easily override this behavior by providing a custom selector using
+a plugin.
+
+    >>> import os
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+
+In this example, the project to be tested consists of a module and
+package and associated tests, laid out like this::
+
+    >>> from nose.util import ls_tree
+    >>> print ls_tree(support)
+    |-- mymodule.py
+    |-- mypackage
+    |   |-- __init__.py
+    |   |-- strings.py
+    |   `-- math
+    |       |-- __init__.py
+    |       `-- basic.py
+    `-- tests
+        |-- testlib.py
+        |-- math
+        |   `-- basic.py
+        |-- mymodule
+        |   `-- my_function.py
+        `-- strings
+            `-- cat.py
+
+Because the test modules do not include ``test`` in their names,
+nose's default selector is unable to discover this project's tests.
+
+.. Note ::
+
+   The run() function in :mod:`nose.plugins.plugintest` reformats test result
+   output to remove timings, which will vary from run to run, and
+   redirects the output to stdout.
+
+    >>> from nose.plugins.plugintest import run_buffered as run
+
+..
+
+    >>> argv = [__file__, '-v', support]
+    >>> run(argv=argv)
+    ----------------------------------------------------------------------
+    Ran 0 tests in ...s
+    <BLANKLINE>
+    OK
+
+The tests for the example project follow a few basic conventions:
+
+* The are all located under the tests/ directory.
+* Test modules are organized into groups under directories named for
+  the module or package they test.
+* testlib is *not* a test module, but it must be importable by the
+  test modules.
+* Test modules contain unitest.TestCase classes that are tests, and
+  may contain other functions or classes that are NOT tests, no matter
+  how they are named.
+
+We can codify those conventions in a selector class.
+
+    >>> from nose.selector import Selector
+    >>> import unittest
+    >>> class MySelector(Selector):
+    ...     def wantDirectory(self, dirname):
+    ...         # we want the tests directory and all directories
+    ...         # beneath it, and no others
+    ...         parts = dirname.split(os.path.sep)
+    ...         return 'tests' in parts
+    ...     def wantFile(self, filename):
+    ...         # we want python modules under tests/, except testlib
+    ...         parts = filename.split(os.path.sep)
+    ...         base, ext = os.path.splitext(parts[-1])
+    ...         return 'tests' in parts and ext == '.py' and base != 'testlib'
+    ...     def wantModule(self, module):
+    ...         # wantDirectory and wantFile above will ensure that
+    ...         # we never see an unwanted module
+    ...         return True
+    ...     def wantFunction(self, function):
+    ...         # never collect functions
+    ...         return False
+    ...     def wantClass(self, cls):
+    ...         # only collect TestCase subclasses
+    ...         return issubclass(cls, unittest.TestCase)
+
+To use our selector class, we need a plugin that can inject it into
+the test loader.
+
+    >>> from nose.plugins import Plugin
+    >>> class UseMySelector(Plugin):
+    ...     enabled = True
+    ...     def configure(self, options, conf):
+    ...         pass # always on
+    ...     def prepareTestLoader(self, loader):
+    ...         loader.selector = MySelector(loader.config)
+
+Now we can execute a test run using the custom selector, and the
+project's tests will be collected.
+
+    >>> run(argv=argv, plugins=[UseMySelector()])
+    test_add (basic.TestBasicMath) ... ok
+    test_sub (basic.TestBasicMath) ... ok
+    test_tuple_groups (my_function.MyFunction) ... ok
+    test_cat (cat.StringsCat) ... ok
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 4 tests in ...s
+    <BLANKLINE>
+    OK
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mymodule$py.class b/functional_tests/doc_tests/test_selector_plugin/support/mymodule$py.class
new file mode 100644 (file)
index 0000000..86c6620
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mymodule$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mymodule.py b/functional_tests/doc_tests/test_selector_plugin/support/mymodule.py
new file mode 100644 (file)
index 0000000..66b3c16
--- /dev/null
@@ -0,0 +1,2 @@
+def my_function(a, b, c):
+    return (a, (b, c))
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mymodule.pyc b/functional_tests/doc_tests/test_selector_plugin/support/mymodule.pyc
new file mode 100644 (file)
index 0000000..6320e75
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mymodule.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class
new file mode 100644 (file)
index 0000000..34dc2b3
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.py b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc
new file mode 100644 (file)
index 0000000..b8730b7
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class
new file mode 100644 (file)
index 0000000..f40b427
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.py
new file mode 100644 (file)
index 0000000..04e0659
--- /dev/null
@@ -0,0 +1 @@
+from mypackage.math.basic import *
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc
new file mode 100644 (file)
index 0000000..6eaebcd
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/__init__.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class
new file mode 100644 (file)
index 0000000..dca938c
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.py b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.py
new file mode 100644 (file)
index 0000000..6cddd28
--- /dev/null
@@ -0,0 +1,5 @@
+def add(a, b):
+    return a + b
+
+def sub(a, b):
+    return a - b
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc
new file mode 100644 (file)
index 0000000..9f434d6
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/math/basic.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings$py.class b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings$py.class
new file mode 100644 (file)
index 0000000..ba232ce
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.py b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.py
new file mode 100644 (file)
index 0000000..8ffc4cc
--- /dev/null
@@ -0,0 +1,2 @@
+def cat(a, b):
+    return "%s%s" % (a, b)
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.pyc b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.pyc
new file mode 100644 (file)
index 0000000..e18782f
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/mypackage/strings.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic$py.class b/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic$py.class
new file mode 100644 (file)
index 0000000..826ddf8
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.py b/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.py
new file mode 100644 (file)
index 0000000..7639ddc
--- /dev/null
@@ -0,0 +1,17 @@
+import testlib
+from mypackage import math
+
+
+class TestBasicMath(testlib.Base):
+
+    def test_add(self):
+        self.assertEqual(math.add(1, 2), 3)
+
+    def test_sub(self):
+        self.assertEqual(math.sub(3, 1), 2)
+
+
+class TestHelperClass:
+    def __init__(self):
+        raise Exception(
+            "This test helper class should not be collected")
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.pyc b/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.pyc
new file mode 100644 (file)
index 0000000..d29ab0b
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/math/basic.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class b/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class
new file mode 100644 (file)
index 0000000..fee09f7
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py b/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.py
new file mode 100644 (file)
index 0000000..85808c9
--- /dev/null
@@ -0,0 +1,7 @@
+import mymodule
+import testlib
+
+class MyFunction(testlib.Base):
+
+    def test_tuple_groups(self):
+        self.assertEqual(mymodule.my_function(1, 2, 3), (1, (2, 3)))
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc b/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc
new file mode 100644 (file)
index 0000000..cb6a0b1
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/mymodule/my_function.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class b/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class
new file mode 100644 (file)
index 0000000..f01b978
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.py b/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.py
new file mode 100644 (file)
index 0000000..3b410e8
--- /dev/null
@@ -0,0 +1,12 @@
+import testlib
+from mypackage import strings
+
+class StringsCat(testlib.Base):
+
+    def test_cat(self):
+        self.assertEqual(strings.cat('one', 'two'), 'onetwo')
+
+
+def test_helper_function():
+    raise Exception(
+        "This test helper function should not be collected")
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc b/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc
new file mode 100644 (file)
index 0000000..80be4e7
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/strings/cat.pyc differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib$py.class b/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib$py.class
new file mode 100644 (file)
index 0000000..fe22fe2
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib$py.class differ
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.py b/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.py
new file mode 100644 (file)
index 0000000..92c4f96
--- /dev/null
@@ -0,0 +1,6 @@
+import unittest
+
+class Base(unittest.TestCase):
+    """Use this base class for all tests.
+    """
+    pass
diff --git a/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.pyc b/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.pyc
new file mode 100644 (file)
index 0000000..99f5b8e
Binary files /dev/null and b/functional_tests/doc_tests/test_selector_plugin/support/tests/testlib.pyc differ
diff --git a/functional_tests/doc_tests/test_xunit_plugin/support/nosetests.xml b/functional_tests/doc_tests/test_xunit_plugin/support/nosetests.xml
new file mode 100644 (file)
index 0000000..cb1ad4f
--- /dev/null
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?><testsuite name="nosetests" tests="4" errors="1" failures="1" skip="1"><testcase classname="test_skip" name="test_ok" time="0.002" /><testcase classname="test_skip" name="test_err" time="0.002"><error type="exceptions.Exception" message="oh no"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 7, in test_err
+    raise Exception("oh no")
+Exception: oh no
+]]></error></testcase><testcase classname="test_skip" name="test_fail" time="0.003"><failure type="exceptions.AssertionError" message="bye"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 10, in test_fail
+    assert False, "bye"
+AssertionError: bye
+]]></failure></testcase><testcase classname="test_skip" name="test_skip" time="0.002"><skipped type="nose.plugins.skip.SkipTest" message="not me"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/nose/case.py", line 197, in runTest
+    self.test(*self.arg)
+  File "/private/tmp/nose_release_1.1.2/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py", line 13, in test_skip
+    raise SkipTest("not me")
+SkipTest: not me
+]]></skipped></testcase></testsuite>
\ No newline at end of file
diff --git a/functional_tests/doc_tests/test_xunit_plugin/support/test_skip$py.class b/functional_tests/doc_tests/test_xunit_plugin/support/test_skip$py.class
new file mode 100644 (file)
index 0000000..7dc7e85
Binary files /dev/null and b/functional_tests/doc_tests/test_xunit_plugin/support/test_skip$py.class differ
diff --git a/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py b/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py
new file mode 100644 (file)
index 0000000..cb26c41
--- /dev/null
@@ -0,0 +1,13 @@
+from nose.exc import SkipTest
+
+def test_ok():
+    pass
+
+def test_err():
+    raise Exception("oh no")
+
+def test_fail():
+    assert False, "bye"
+
+def test_skip():
+    raise SkipTest("not me")
diff --git a/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.pyc b/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.pyc
new file mode 100644 (file)
index 0000000..2cf730b
Binary files /dev/null and b/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.pyc differ
diff --git a/functional_tests/doc_tests/test_xunit_plugin/test_skips.rst b/functional_tests/doc_tests/test_xunit_plugin/test_skips.rst
new file mode 100644 (file)
index 0000000..c0c3fbc
--- /dev/null
@@ -0,0 +1,40 @@
+XUnit output supports skips
+---------------------------
+
+>>> import os
+>>> from nose.plugins.xunit import Xunit
+>>> from nose.plugins.skip import SkipTest, Skip
+>>> support = os.path.join(os.path.dirname(__file__), 'support')
+>>> outfile = os.path.join(support, 'nosetests.xml')
+>>> from nose.plugins.plugintest import run_buffered as run
+>>> argv = [__file__, '-v', '--with-xunit', support,
+...         '--xunit-file=%s' % outfile]
+>>> run(argv=argv, plugins=[Xunit(), Skip()]) # doctest: +ELLIPSIS
+test_skip.test_ok ... ok
+test_skip.test_err ... ERROR
+test_skip.test_fail ... FAIL
+test_skip.test_skip ... SKIP: not me
+<BLANKLINE>
+======================================================================
+ERROR: test_skip.test_err
+----------------------------------------------------------------------
+Traceback (most recent call last):
+...
+Exception: oh no
+<BLANKLINE>
+======================================================================
+FAIL: test_skip.test_fail
+----------------------------------------------------------------------
+Traceback (most recent call last):
+...
+AssertionError: bye
+<BLANKLINE>
+----------------------------------------------------------------------
+XML: ...nosetests.xml
+----------------------------------------------------------------------
+Ran 4 tests in ...s
+<BLANKLINE>
+FAILED (SKIP=1, errors=1, failures=1)
+
+>>> open(outfile, 'r').read() # doctest: +ELLIPSIS
+'<?xml version="1.0" encoding="UTF-8"?><testsuite name="nosetests" tests="4" errors="1" failures="1" skip="1"><testcase classname="test_skip" name="test_ok" time="..." /><testcase classname="test_skip" name="test_err" time="..."><error type="...Exception" message="oh no">...</error></testcase><testcase classname="test_skip" name="test_fail" time="..."><failure type="...AssertionError" message="bye">...</failure></testcase><testcase classname="test_skip" name="test_skip" time="..."><skipped type="...SkipTest" message="not me">...</skipped></testcase></testsuite>'
diff --git a/functional_tests/support/att/test_attr$py.class b/functional_tests/support/att/test_attr$py.class
new file mode 100644 (file)
index 0000000..ab941f6
Binary files /dev/null and b/functional_tests/support/att/test_attr$py.class differ
diff --git a/functional_tests/support/att/test_attr.py b/functional_tests/support/att/test_attr.py
new file mode 100644 (file)
index 0000000..dd2f292
--- /dev/null
@@ -0,0 +1,96 @@
+from nose.plugins.attrib import attr
+import unittest
+
+def test_one():
+    pass
+test_one.a = 1
+test_one.d = [1, 2]
+
+
+def test_two():
+    pass
+test_two.a = 1
+test_two.c = 20
+test_two.d = [2, 3]
+
+def test_three():
+    pass
+test_three.b = 1
+test_three.d = [1, 3]
+
+class TestClass:
+    a = 1
+    def test_class_one(self):
+        pass
+
+    def test_class_two(self):
+        pass
+    test_class_two.b = 2
+
+    def test_class_three(self):
+        pass
+
+    
+class Something(unittest.TestCase):
+    b = 2
+    def test_case_one(self):
+        pass
+    
+    def test_case_two(self):
+        pass
+    test_case_two.c = 50
+    
+    def test_case_three(self):
+        pass
+
+
+class Superclass:
+    def test_method(self):
+        pass
+    test_method.from_super = True
+
+class TestSubclass(Superclass):
+    pass
+
+
+class Static:
+    def test_with_static(self):
+        pass
+    test_with_static.with_static = True
+
+    def static(self):
+        pass
+    static = staticmethod(static)
+
+
+class TestClassAndMethodAttrs(unittest.TestCase):
+    def test_method(self):
+        pass
+    test_method.meth_attr = 'method'
+TestClassAndMethodAttrs.cls_attr = 'class'
+
+
+class TestAttrClass:
+    from_super = True
+
+    def ends_with_test(self):
+        pass
+
+    def test_one(self):
+        pass
+
+    def test_two(self):
+        pass
+    test_two.from_super = False
+
+TestAttrClass = attr('a')(TestAttrClass)
+
+
+class TestAttrSubClass(TestAttrClass):
+    def test_sub_three(self):
+        pass
+
+def added_later_test(self):
+    pass
+
+TestAttrSubClass.added_later_test = added_later_test
diff --git a/functional_tests/support/att/test_attr.pyc b/functional_tests/support/att/test_attr.pyc
new file mode 100644 (file)
index 0000000..61806b7
Binary files /dev/null and b/functional_tests/support/att/test_attr.pyc differ
diff --git a/functional_tests/support/ctx/mod_import_skip$py.class b/functional_tests/support/ctx/mod_import_skip$py.class
new file mode 100644 (file)
index 0000000..d4efb25
Binary files /dev/null and b/functional_tests/support/ctx/mod_import_skip$py.class differ
diff --git a/functional_tests/support/ctx/mod_import_skip.py b/functional_tests/support/ctx/mod_import_skip.py
new file mode 100644 (file)
index 0000000..3a5af10
--- /dev/null
@@ -0,0 +1,9 @@
+from nose import SkipTest
+
+raise SkipTest("Don't run me")
+
+def test():
+    assert False, "Should not be run"
+
+def test2():
+    assert False, "Should not be run"
diff --git a/functional_tests/support/ctx/mod_import_skip.pyc b/functional_tests/support/ctx/mod_import_skip.pyc
new file mode 100644 (file)
index 0000000..90721fa
Binary files /dev/null and b/functional_tests/support/ctx/mod_import_skip.pyc differ
diff --git a/functional_tests/support/ctx/mod_setup_fails$py.class b/functional_tests/support/ctx/mod_setup_fails$py.class
new file mode 100644 (file)
index 0000000..37b3959
Binary files /dev/null and b/functional_tests/support/ctx/mod_setup_fails$py.class differ
diff --git a/functional_tests/support/ctx/mod_setup_fails.py b/functional_tests/support/ctx/mod_setup_fails.py
new file mode 100644 (file)
index 0000000..d7f49bc
--- /dev/null
@@ -0,0 +1,12 @@
+def setup():
+    assert False, "Failure in mod setup"
+
+
+def test_a():
+    raise AssertionError("test_a should not run")
+
+
+def test_b():
+    raise AssertionError("test_b should not run")
+
+
diff --git a/functional_tests/support/ctx/mod_setup_fails.pyc b/functional_tests/support/ctx/mod_setup_fails.pyc
new file mode 100644 (file)
index 0000000..f3638b8
Binary files /dev/null and b/functional_tests/support/ctx/mod_setup_fails.pyc differ
diff --git a/functional_tests/support/ctx/mod_setup_skip$py.class b/functional_tests/support/ctx/mod_setup_skip$py.class
new file mode 100644 (file)
index 0000000..6cc946d
Binary files /dev/null and b/functional_tests/support/ctx/mod_setup_skip$py.class differ
diff --git a/functional_tests/support/ctx/mod_setup_skip.py b/functional_tests/support/ctx/mod_setup_skip.py
new file mode 100644 (file)
index 0000000..6e5ec65
--- /dev/null
@@ -0,0 +1,14 @@
+from nose import SkipTest
+
+def setup():
+    raise SkipTest("no thanks")
+
+
+def test_a():
+    raise AssertionError("test_a should not run")
+
+
+def test_b():
+    raise AssertionError("test_b should not run")
+
+
diff --git a/functional_tests/support/ctx/mod_setup_skip.pyc b/functional_tests/support/ctx/mod_setup_skip.pyc
new file mode 100644 (file)
index 0000000..2e63475
Binary files /dev/null and b/functional_tests/support/ctx/mod_setup_skip.pyc differ
diff --git a/functional_tests/support/dir1/mod$py.class b/functional_tests/support/dir1/mod$py.class
new file mode 100644 (file)
index 0000000..1cce5cc
Binary files /dev/null and b/functional_tests/support/dir1/mod$py.class differ
diff --git a/functional_tests/support/dir1/mod.py b/functional_tests/support/dir1/mod.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir1/mod.pyc b/functional_tests/support/dir1/mod.pyc
new file mode 100644 (file)
index 0000000..f30d63e
Binary files /dev/null and b/functional_tests/support/dir1/mod.pyc differ
diff --git a/functional_tests/support/dir1/pak/__init__$py.class b/functional_tests/support/dir1/pak/__init__$py.class
new file mode 100644 (file)
index 0000000..3a1e179
Binary files /dev/null and b/functional_tests/support/dir1/pak/__init__$py.class differ
diff --git a/functional_tests/support/dir1/pak/__init__.py b/functional_tests/support/dir1/pak/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir1/pak/__init__.pyc b/functional_tests/support/dir1/pak/__init__.pyc
new file mode 100644 (file)
index 0000000..9057c2f
Binary files /dev/null and b/functional_tests/support/dir1/pak/__init__.pyc differ
diff --git a/functional_tests/support/dir1/pak/mod$py.class b/functional_tests/support/dir1/pak/mod$py.class
new file mode 100644 (file)
index 0000000..b0fdc88
Binary files /dev/null and b/functional_tests/support/dir1/pak/mod$py.class differ
diff --git a/functional_tests/support/dir1/pak/mod.py b/functional_tests/support/dir1/pak/mod.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir1/pak/mod.pyc b/functional_tests/support/dir1/pak/mod.pyc
new file mode 100644 (file)
index 0000000..05fb3ad
Binary files /dev/null and b/functional_tests/support/dir1/pak/mod.pyc differ
diff --git a/functional_tests/support/dir1/pak/sub/__init__$py.class b/functional_tests/support/dir1/pak/sub/__init__$py.class
new file mode 100644 (file)
index 0000000..1371333
Binary files /dev/null and b/functional_tests/support/dir1/pak/sub/__init__$py.class differ
diff --git a/functional_tests/support/dir1/pak/sub/__init__.py b/functional_tests/support/dir1/pak/sub/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir1/pak/sub/__init__.pyc b/functional_tests/support/dir1/pak/sub/__init__.pyc
new file mode 100644 (file)
index 0000000..5b550b6
Binary files /dev/null and b/functional_tests/support/dir1/pak/sub/__init__.pyc differ
diff --git a/functional_tests/support/dir2/mod$py.class b/functional_tests/support/dir2/mod$py.class
new file mode 100644 (file)
index 0000000..1243c9c
Binary files /dev/null and b/functional_tests/support/dir2/mod$py.class differ
diff --git a/functional_tests/support/dir2/mod.py b/functional_tests/support/dir2/mod.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir2/mod.pyc b/functional_tests/support/dir2/mod.pyc
new file mode 100644 (file)
index 0000000..bd37634
Binary files /dev/null and b/functional_tests/support/dir2/mod.pyc differ
diff --git a/functional_tests/support/dir2/pak/__init__$py.class b/functional_tests/support/dir2/pak/__init__$py.class
new file mode 100644 (file)
index 0000000..1f76922
Binary files /dev/null and b/functional_tests/support/dir2/pak/__init__$py.class differ
diff --git a/functional_tests/support/dir2/pak/__init__.py b/functional_tests/support/dir2/pak/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir2/pak/__init__.pyc b/functional_tests/support/dir2/pak/__init__.pyc
new file mode 100644 (file)
index 0000000..9680c7c
Binary files /dev/null and b/functional_tests/support/dir2/pak/__init__.pyc differ
diff --git a/functional_tests/support/dir2/pak/mod$py.class b/functional_tests/support/dir2/pak/mod$py.class
new file mode 100644 (file)
index 0000000..cf9e517
Binary files /dev/null and b/functional_tests/support/dir2/pak/mod$py.class differ
diff --git a/functional_tests/support/dir2/pak/mod.py b/functional_tests/support/dir2/pak/mod.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir2/pak/mod.pyc b/functional_tests/support/dir2/pak/mod.pyc
new file mode 100644 (file)
index 0000000..24c8bf7
Binary files /dev/null and b/functional_tests/support/dir2/pak/mod.pyc differ
diff --git a/functional_tests/support/dir2/pak/sub/__init__$py.class b/functional_tests/support/dir2/pak/sub/__init__$py.class
new file mode 100644 (file)
index 0000000..aeedc56
Binary files /dev/null and b/functional_tests/support/dir2/pak/sub/__init__$py.class differ
diff --git a/functional_tests/support/dir2/pak/sub/__init__.py b/functional_tests/support/dir2/pak/sub/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/dir2/pak/sub/__init__.pyc b/functional_tests/support/dir2/pak/sub/__init__.pyc
new file mode 100644 (file)
index 0000000..cf84d0c
Binary files /dev/null and b/functional_tests/support/dir2/pak/sub/__init__.pyc differ
diff --git a/functional_tests/support/dtt/docs/doc.txt b/functional_tests/support/dtt/docs/doc.txt
new file mode 100644 (file)
index 0000000..4cf3955
--- /dev/null
@@ -0,0 +1,6 @@
+This document is a doctest.
+
+    >>> 1 + 1
+    2
+
+That's all
diff --git a/functional_tests/support/dtt/docs/errdoc.txt b/functional_tests/support/dtt/docs/errdoc.txt
new file mode 100644 (file)
index 0000000..a947b52
--- /dev/null
@@ -0,0 +1,7 @@
+This document contains an invalid doctest.
+
+    >>> def foo():
+    >>> def bar():
+    ...     pass
+
+That is all.
diff --git a/functional_tests/support/dtt/docs/nodoc.txt b/functional_tests/support/dtt/docs/nodoc.txt
new file mode 100644 (file)
index 0000000..665f935
--- /dev/null
@@ -0,0 +1 @@
+This document contains no doctests.
diff --git a/functional_tests/support/dtt/some_mod$py.class b/functional_tests/support/dtt/some_mod$py.class
new file mode 100644 (file)
index 0000000..51dd346
Binary files /dev/null and b/functional_tests/support/dtt/some_mod$py.class differ
diff --git a/functional_tests/support/dtt/some_mod.py b/functional_tests/support/dtt/some_mod.py
new file mode 100644 (file)
index 0000000..23880d5
--- /dev/null
@@ -0,0 +1,17 @@
+"""
+Top level
+---------
+
+Let's run a test.
+
+    >>> foo(1)
+    2
+    >>> 2 + 2
+    4
+"""
+def foo(a):
+    """
+    >>> foo(2)
+    3
+    """
+    return a + 1
diff --git a/functional_tests/support/dtt/some_mod.pyc b/functional_tests/support/dtt/some_mod.pyc
new file mode 100644 (file)
index 0000000..081a3f6
Binary files /dev/null and b/functional_tests/support/dtt/some_mod.pyc differ
diff --git a/functional_tests/support/empty/.hidden b/functional_tests/support/empty/.hidden
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/functional_tests/support/ep/Some_plugin.egg-info/PKG-INFO b/functional_tests/support/ep/Some_plugin.egg-info/PKG-INFO
new file mode 100644 (file)
index 0000000..63420aa
--- /dev/null
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: Some-plugin
+Version: 0.0.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/functional_tests/support/ep/Some_plugin.egg-info/SOURCES.txt b/functional_tests/support/ep/Some_plugin.egg-info/SOURCES.txt
new file mode 100644 (file)
index 0000000..69e3e80
--- /dev/null
@@ -0,0 +1,6 @@
+setup.py
+Some_plugin.egg-info/PKG-INFO
+Some_plugin.egg-info/SOURCES.txt
+Some_plugin.egg-info/dependency_links.txt
+Some_plugin.egg-info/entry_points.txt
+Some_plugin.egg-info/top_level.txt
diff --git a/functional_tests/support/ep/Some_plugin.egg-info/dependency_links.txt b/functional_tests/support/ep/Some_plugin.egg-info/dependency_links.txt
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/functional_tests/support/ep/Some_plugin.egg-info/entry_points.txt b/functional_tests/support/ep/Some_plugin.egg-info/entry_points.txt
new file mode 100644 (file)
index 0000000..3afd2db
--- /dev/null
@@ -0,0 +1,3 @@
+[nose.plugins.0.10]
+someplugin = someplugin:SomePlugin
+
diff --git a/functional_tests/support/ep/Some_plugin.egg-info/top_level.txt b/functional_tests/support/ep/Some_plugin.egg-info/top_level.txt
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/functional_tests/support/ep/setup.py b/functional_tests/support/ep/setup.py
new file mode 100644 (file)
index 0000000..e751d18
--- /dev/null
@@ -0,0 +1,10 @@
+from setuptools import setup, find_packages
+
+setup(
+    name='Some plugin',
+    packages = find_packages(),
+    entry_points = {
+    'nose.plugins.0.10': [
+    'someplugin = someplugin:SomePlugin'
+    ]
+    })
diff --git a/functional_tests/support/ep/someplugin.py b/functional_tests/support/ep/someplugin.py
new file mode 100644 (file)
index 0000000..3442a2d
--- /dev/null
@@ -0,0 +1,4 @@
+from nose.plugins import Plugin
+
+class SomePlugin(Plugin):
+    pass
diff --git a/functional_tests/support/fdp/test_fdp$py.class b/functional_tests/support/fdp/test_fdp$py.class
new file mode 100644 (file)
index 0000000..5dd1384
Binary files /dev/null and b/functional_tests/support/fdp/test_fdp$py.class differ
diff --git a/functional_tests/support/fdp/test_fdp.py b/functional_tests/support/fdp/test_fdp.py
new file mode 100644 (file)
index 0000000..bcaab0d
--- /dev/null
@@ -0,0 +1,10 @@
+def test_err():
+    raise TypeError("I can't type")
+
+def test_fail():
+    print "Hello"
+    a = 2
+    assert a == 4, "a is not 4"
+
+def test_ok():
+    pass
diff --git a/functional_tests/support/fdp/test_fdp.pyc b/functional_tests/support/fdp/test_fdp.pyc
new file mode 100644 (file)
index 0000000..29dba79
Binary files /dev/null and b/functional_tests/support/fdp/test_fdp.pyc differ
diff --git a/functional_tests/support/fdp/test_fdp_no_capt$py.class b/functional_tests/support/fdp/test_fdp_no_capt$py.class
new file mode 100644 (file)
index 0000000..e0bef8e
Binary files /dev/null and b/functional_tests/support/fdp/test_fdp_no_capt$py.class differ
diff --git a/functional_tests/support/fdp/test_fdp_no_capt.py b/functional_tests/support/fdp/test_fdp_no_capt.py
new file mode 100644 (file)
index 0000000..30b7b99
--- /dev/null
@@ -0,0 +1,9 @@
+def test_err():
+    raise TypeError("I can't type")
+
+def test_fail():
+    a = 2
+    assert a == 4, "a is not 4"
+
+def test_ok():
+    pass
diff --git a/functional_tests/support/fdp/test_fdp_no_capt.pyc b/functional_tests/support/fdp/test_fdp_no_capt.pyc
new file mode 100644 (file)
index 0000000..17fa25c
Binary files /dev/null and b/functional_tests/support/fdp/test_fdp_no_capt.pyc differ
diff --git a/functional_tests/support/gen/test$py.class b/functional_tests/support/gen/test$py.class
new file mode 100644 (file)
index 0000000..c161fcc
Binary files /dev/null and b/functional_tests/support/gen/test$py.class differ
diff --git a/functional_tests/support/gen/test.py b/functional_tests/support/gen/test.py
new file mode 100644 (file)
index 0000000..13a0c3e
--- /dev/null
@@ -0,0 +1,12 @@
+"""This test will fail if generators bind too early."""
+    
+from nose.tools import eq_
+
+def test1():
+
+    def func(_l, _n):
+        eq_(len(_l), _n)
+    l = []
+    for i in xrange(5):
+        yield func, l, i
+        l.append(None)
diff --git a/functional_tests/support/gen/test.pyc b/functional_tests/support/gen/test.pyc
new file mode 100644 (file)
index 0000000..626a58a
Binary files /dev/null and b/functional_tests/support/gen/test.pyc differ
diff --git a/functional_tests/support/id_fails/test_a$py.class b/functional_tests/support/id_fails/test_a$py.class
new file mode 100644 (file)
index 0000000..496e29a
Binary files /dev/null and b/functional_tests/support/id_fails/test_a$py.class differ
diff --git a/functional_tests/support/id_fails/test_a.py b/functional_tests/support/id_fails/test_a.py
new file mode 100644 (file)
index 0000000..8eac49a
--- /dev/null
@@ -0,0 +1 @@
+import apackagethatdoesntexist
diff --git a/functional_tests/support/id_fails/test_a.pyc b/functional_tests/support/id_fails/test_a.pyc
new file mode 100644 (file)
index 0000000..e9f3d42
Binary files /dev/null and b/functional_tests/support/id_fails/test_a.pyc differ
diff --git a/functional_tests/support/id_fails/test_b$py.class b/functional_tests/support/id_fails/test_b$py.class
new file mode 100644 (file)
index 0000000..fdb7b52
Binary files /dev/null and b/functional_tests/support/id_fails/test_b$py.class differ
diff --git a/functional_tests/support/id_fails/test_b.py b/functional_tests/support/id_fails/test_b.py
new file mode 100644 (file)
index 0000000..8999edc
--- /dev/null
@@ -0,0 +1,5 @@
+def test():
+    pass
+
+def test_fail():
+    assert False
diff --git a/functional_tests/support/id_fails/test_b.pyc b/functional_tests/support/id_fails/test_b.pyc
new file mode 100644 (file)
index 0000000..2d8b147
Binary files /dev/null and b/functional_tests/support/id_fails/test_b.pyc differ
diff --git a/functional_tests/support/idp/exm$py.class b/functional_tests/support/idp/exm$py.class
new file mode 100644 (file)
index 0000000..936f119
Binary files /dev/null and b/functional_tests/support/idp/exm$py.class differ
diff --git a/functional_tests/support/idp/exm.py b/functional_tests/support/idp/exm.py
new file mode 100644 (file)
index 0000000..a5ae6e2
--- /dev/null
@@ -0,0 +1,21 @@
+"""
+Module-level doctest
+
+    >>> 1
+    1
+    >>> add_one(_)
+    2
+"""
+
+def add_one(i):
+    """
+    function doctest
+
+    >>> add_one(1)
+    2
+    >>> add_one(2)
+    3
+    >>> add_one('steve')
+    joe    
+    """
+    return i + 1
diff --git a/functional_tests/support/idp/exm.pyc b/functional_tests/support/idp/exm.pyc
new file mode 100644 (file)
index 0000000..f7e9fb7
Binary files /dev/null and b/functional_tests/support/idp/exm.pyc differ
diff --git a/functional_tests/support/idp/tests$py.class b/functional_tests/support/idp/tests$py.class
new file mode 100644 (file)
index 0000000..0f643c0
Binary files /dev/null and b/functional_tests/support/idp/tests$py.class differ
diff --git a/functional_tests/support/idp/tests.py b/functional_tests/support/idp/tests.py
new file mode 100644 (file)
index 0000000..fc58278
--- /dev/null
@@ -0,0 +1,38 @@
+import unittest
+
+def test_a():
+    pass
+
+def test_b():
+    raise TypeError("I am typeless")
+
+def test_c():
+    assert False, "I am contrary"
+
+def test_gen():
+    def tryit(i):
+        pass
+    
+    for i in range(0, 4):
+        yield tryit, i
+
+
+class TestCase(unittest.TestCase):
+    def test_a(self):
+        pass
+    def test_b(self):
+        pass
+
+
+class TestCls:
+    def test_a(self):
+        pass
+
+    def test_gen(self):
+        def tryit(i):
+            pass
+        for i in range(0, 4):
+            yield tryit, i
+
+    def test_z(self):
+        pass
diff --git a/functional_tests/support/idp/tests.pyc b/functional_tests/support/idp/tests.pyc
new file mode 100644 (file)
index 0000000..e08e562
Binary files /dev/null and b/functional_tests/support/idp/tests.pyc differ
diff --git a/functional_tests/support/ipt/test1/ipthelp$py.class b/functional_tests/support/ipt/test1/ipthelp$py.class
new file mode 100644 (file)
index 0000000..92ac701
Binary files /dev/null and b/functional_tests/support/ipt/test1/ipthelp$py.class differ
diff --git a/functional_tests/support/ipt/test1/ipthelp.py b/functional_tests/support/ipt/test1/ipthelp.py
new file mode 100644 (file)
index 0000000..fdbe2e4
--- /dev/null
@@ -0,0 +1,4 @@
+print "1help imported"
+def help():
+    print "1help called"
+    pass
diff --git a/functional_tests/support/ipt/test1/ipthelp.pyc b/functional_tests/support/ipt/test1/ipthelp.pyc
new file mode 100644 (file)
index 0000000..2ca4333
Binary files /dev/null and b/functional_tests/support/ipt/test1/ipthelp.pyc differ
diff --git a/functional_tests/support/ipt/test1/tests$py.class b/functional_tests/support/ipt/test1/tests$py.class
new file mode 100644 (file)
index 0000000..a5fa254
Binary files /dev/null and b/functional_tests/support/ipt/test1/tests$py.class differ
diff --git a/functional_tests/support/ipt/test1/tests.py b/functional_tests/support/ipt/test1/tests.py
new file mode 100644 (file)
index 0000000..a9595f7
--- /dev/null
@@ -0,0 +1,7 @@
+import sys
+print 'ipthelp', sys.modules.get('ipthelp')
+import ipthelp
+print ipthelp
+
+def test1():
+    ipthelp.help()
diff --git a/functional_tests/support/ipt/test1/tests.pyc b/functional_tests/support/ipt/test1/tests.pyc
new file mode 100644 (file)
index 0000000..a00c8cf
Binary files /dev/null and b/functional_tests/support/ipt/test1/tests.pyc differ
diff --git a/functional_tests/support/ipt/test2/ipthelp$py.class b/functional_tests/support/ipt/test2/ipthelp$py.class
new file mode 100644 (file)
index 0000000..1e16aab
Binary files /dev/null and b/functional_tests/support/ipt/test2/ipthelp$py.class differ
diff --git a/functional_tests/support/ipt/test2/ipthelp.py b/functional_tests/support/ipt/test2/ipthelp.py
new file mode 100644 (file)
index 0000000..cafd917
--- /dev/null
@@ -0,0 +1,5 @@
+print "2help imported"
+
+def help(a):
+    print "2 help %s" % 1
+    pass
diff --git a/functional_tests/support/ipt/test2/ipthelp.pyc b/functional_tests/support/ipt/test2/ipthelp.pyc
new file mode 100644 (file)
index 0000000..b1e7ae4
Binary files /dev/null and b/functional_tests/support/ipt/test2/ipthelp.pyc differ
diff --git a/functional_tests/support/ipt/test2/tests$py.class b/functional_tests/support/ipt/test2/tests$py.class
new file mode 100644 (file)
index 0000000..f5fd269
Binary files /dev/null and b/functional_tests/support/ipt/test2/tests$py.class differ
diff --git a/functional_tests/support/ipt/test2/tests.py b/functional_tests/support/ipt/test2/tests.py
new file mode 100644 (file)
index 0000000..1c95896
--- /dev/null
@@ -0,0 +1,8 @@
+import sys
+
+print 'ipthelp', sys.modules.get('ipthelp')
+import ipthelp
+print ipthelp
+
+def test2():
+    ipthelp.help(1)
diff --git a/functional_tests/support/ipt/test2/tests.pyc b/functional_tests/support/ipt/test2/tests.pyc
new file mode 100644 (file)
index 0000000..7c38ba7
Binary files /dev/null and b/functional_tests/support/ipt/test2/tests.pyc differ
diff --git a/functional_tests/support/issue038/test$py.class b/functional_tests/support/issue038/test$py.class
new file mode 100644 (file)
index 0000000..89ae0ca
Binary files /dev/null and b/functional_tests/support/issue038/test$py.class differ
diff --git a/functional_tests/support/issue038/test.py b/functional_tests/support/issue038/test.py
new file mode 100644 (file)
index 0000000..c55e5a9
--- /dev/null
@@ -0,0 +1,9 @@
+from nose.exc import SkipTest
+
+
+def test_a():
+    pass
+
+
+def test_b():
+    raise SkipTest("I'm not ready for test b")
diff --git a/functional_tests/support/issue038/test.pyc b/functional_tests/support/issue038/test.pyc
new file mode 100644 (file)
index 0000000..1982d2d
Binary files /dev/null and b/functional_tests/support/issue038/test.pyc differ
diff --git a/functional_tests/support/issue072/test$py.class b/functional_tests/support/issue072/test$py.class
new file mode 100644 (file)
index 0000000..70e2ac2
Binary files /dev/null and b/functional_tests/support/issue072/test$py.class differ
diff --git a/functional_tests/support/issue072/test.py b/functional_tests/support/issue072/test.py
new file mode 100644 (file)
index 0000000..2aab0bd
--- /dev/null
@@ -0,0 +1,4 @@
+def test():
+    print "something"
+    a = 4
+    assert a == 2
diff --git a/functional_tests/support/issue072/test.pyc b/functional_tests/support/issue072/test.pyc
new file mode 100644 (file)
index 0000000..ab45055
Binary files /dev/null and b/functional_tests/support/issue072/test.pyc differ
diff --git a/functional_tests/support/issue082/_mypackage/__init__.py b/functional_tests/support/issue082/_mypackage/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/issue082/_mypackage/_eggs.py b/functional_tests/support/issue082/_mypackage/_eggs.py
new file mode 100644 (file)
index 0000000..81c65f7
--- /dev/null
@@ -0,0 +1,8 @@
+"""
+>>> True
+False
+"""
+"""
+>>> True
+False
+"""
diff --git a/functional_tests/support/issue082/_mypackage/bacon.py b/functional_tests/support/issue082/_mypackage/bacon.py
new file mode 100644 (file)
index 0000000..81c65f7
--- /dev/null
@@ -0,0 +1,8 @@
+"""
+>>> True
+False
+"""
+"""
+>>> True
+False
+"""
diff --git a/functional_tests/support/issue082/mypublicpackage/__init__$py.class b/functional_tests/support/issue082/mypublicpackage/__init__$py.class
new file mode 100644 (file)
index 0000000..6188ba9
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/__init__$py.class differ
diff --git a/functional_tests/support/issue082/mypublicpackage/__init__.py b/functional_tests/support/issue082/mypublicpackage/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/issue082/mypublicpackage/__init__.pyc b/functional_tests/support/issue082/mypublicpackage/__init__.pyc
new file mode 100644 (file)
index 0000000..212022a
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/__init__.pyc differ
diff --git a/functional_tests/support/issue082/mypublicpackage/_foo$py.class b/functional_tests/support/issue082/mypublicpackage/_foo$py.class
new file mode 100644 (file)
index 0000000..314b0ec
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/_foo$py.class differ
diff --git a/functional_tests/support/issue082/mypublicpackage/_foo.py b/functional_tests/support/issue082/mypublicpackage/_foo.py
new file mode 100644 (file)
index 0000000..81c65f7
--- /dev/null
@@ -0,0 +1,8 @@
+"""
+>>> True
+False
+"""
+"""
+>>> True
+False
+"""
diff --git a/functional_tests/support/issue082/mypublicpackage/_foo.pyc b/functional_tests/support/issue082/mypublicpackage/_foo.pyc
new file mode 100644 (file)
index 0000000..c75cbd9
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/_foo.pyc differ
diff --git a/functional_tests/support/issue082/mypublicpackage/bar$py.class b/functional_tests/support/issue082/mypublicpackage/bar$py.class
new file mode 100644 (file)
index 0000000..b2f16f9
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/bar$py.class differ
diff --git a/functional_tests/support/issue082/mypublicpackage/bar.py b/functional_tests/support/issue082/mypublicpackage/bar.py
new file mode 100644 (file)
index 0000000..81c65f7
--- /dev/null
@@ -0,0 +1,8 @@
+"""
+>>> True
+False
+"""
+"""
+>>> True
+False
+"""
diff --git a/functional_tests/support/issue082/mypublicpackage/bar.pyc b/functional_tests/support/issue082/mypublicpackage/bar.pyc
new file mode 100644 (file)
index 0000000..e2f7344
Binary files /dev/null and b/functional_tests/support/issue082/mypublicpackage/bar.pyc differ
diff --git a/functional_tests/support/issue130/test$py.class b/functional_tests/support/issue130/test$py.class
new file mode 100644 (file)
index 0000000..37be498
Binary files /dev/null and b/functional_tests/support/issue130/test$py.class differ
diff --git a/functional_tests/support/issue130/test.py b/functional_tests/support/issue130/test.py
new file mode 100644 (file)
index 0000000..9778eef
--- /dev/null
@@ -0,0 +1,5 @@
+def setup():
+   raise "KABOOM"
+
+def test_foo():
+    assert(1==1)
diff --git a/functional_tests/support/issue130/test.pyc b/functional_tests/support/issue130/test.pyc
new file mode 100644 (file)
index 0000000..c52735b
Binary files /dev/null and b/functional_tests/support/issue130/test.pyc differ
diff --git a/functional_tests/support/issue143/not-a-package/__init__.py b/functional_tests/support/issue143/not-a-package/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/functional_tests/support/issue143/not-a-package/test.py b/functional_tests/support/issue143/not-a-package/test.py
new file mode 100644 (file)
index 0000000..c1fb1c2
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    raise Exception("do not run")
diff --git a/functional_tests/support/issue191/UNKNOWN.egg-info/PKG-INFO b/functional_tests/support/issue191/UNKNOWN.egg-info/PKG-INFO
new file mode 100644 (file)
index 0000000..11b3dcd
--- /dev/null
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: UNKNOWN
+Version: 0.0.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/functional_tests/support/issue191/UNKNOWN.egg-info/SOURCES.txt b/functional_tests/support/issue191/UNKNOWN.egg-info/SOURCES.txt
new file mode 100644 (file)
index 0000000..75d8cfe
--- /dev/null
@@ -0,0 +1,6 @@
+setup.cfg
+setup.py
+UNKNOWN.egg-info/PKG-INFO
+UNKNOWN.egg-info/SOURCES.txt
+UNKNOWN.egg-info/dependency_links.txt
+UNKNOWN.egg-info/top_level.txt
\ No newline at end of file
diff --git a/functional_tests/support/issue191/UNKNOWN.egg-info/dependency_links.txt b/functional_tests/support/issue191/UNKNOWN.egg-info/dependency_links.txt
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/functional_tests/support/issue191/UNKNOWN.egg-info/top_level.txt b/functional_tests/support/issue191/UNKNOWN.egg-info/top_level.txt
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/functional_tests/support/issue191/setup.cfg b/functional_tests/support/issue191/setup.cfg
new file mode 100644 (file)
index 0000000..431cf7a
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+verbosity=2
\ No newline at end of file
diff --git a/functional_tests/support/issue191/setup.py b/functional_tests/support/issue191/setup.py
new file mode 100644 (file)
index 0000000..86276be
--- /dev/null
@@ -0,0 +1,3 @@
+from setuptools import setup
+
+setup(name='issue191')
diff --git a/functional_tests/support/issue191/test$py.class b/functional_tests/support/issue191/test$py.class
new file mode 100644 (file)
index 0000000..b68f450
Binary files /dev/null and b/functional_tests/support/issue191/test$py.class differ
diff --git a/functional_tests/support/issue191/test.py b/functional_tests/support/issue191/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/functional_tests/support/issue191/test.pyc b/functional_tests/support/issue191/test.pyc
new file mode 100644 (file)
index 0000000..d774193
Binary files /dev/null and b/functional_tests/support/issue191/test.pyc differ
diff --git a/functional_tests/support/issue269/test_bad_class$py.class b/functional_tests/support/issue269/test_bad_class$py.class
new file mode 100644 (file)
index 0000000..a351f0a
Binary files /dev/null and b/functional_tests/support/issue269/test_bad_class$py.class differ
diff --git a/functional_tests/support/issue269/test_bad_class.py b/functional_tests/support/issue269/test_bad_class.py
new file mode 100644 (file)
index 0000000..b5642a6
--- /dev/null
@@ -0,0 +1,5 @@
+class TestCrashy(object):
+    def __init__(self):
+        raise Exception("pow")
+    def test_whatever(self):
+        pass
diff --git a/functional_tests/support/issue269/test_bad_class.pyc b/functional_tests/support/issue269/test_bad_class.pyc
new file mode 100644 (file)
index 0000000..ea243cd
Binary files /dev/null and b/functional_tests/support/issue269/test_bad_class.pyc differ
diff --git a/functional_tests/support/issue279/test_mod_setup_fails$py.class b/functional_tests/support/issue279/test_mod_setup_fails$py.class
new file mode 100644 (file)
index 0000000..4d8be3d
Binary files /dev/null and b/functional_tests/support/issue279/test_mod_setup_fails$py.class differ
diff --git a/functional_tests/support/issue279/test_mod_setup_fails.py b/functional_tests/support/issue279/test_mod_setup_fails.py
new file mode 100644 (file)
index 0000000..80d22d7
--- /dev/null
@@ -0,0 +1,5 @@
+def setup():
+    raise Exception("I would prefer not to")
+
+def test():
+    raise Exception("I should never run")
diff --git a/functional_tests/support/issue279/test_mod_setup_fails.pyc b/functional_tests/support/issue279/test_mod_setup_fails.pyc
new file mode 100644 (file)
index 0000000..7a6f673
Binary files /dev/null and b/functional_tests/support/issue279/test_mod_setup_fails.pyc differ
diff --git a/functional_tests/support/issue408/nosetests.xml b/functional_tests/support/issue408/nosetests.xml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/functional_tests/support/issue408/test$py.class b/functional_tests/support/issue408/test$py.class
new file mode 100644 (file)
index 0000000..b14cdde
Binary files /dev/null and b/functional_tests/support/issue408/test$py.class differ
diff --git a/functional_tests/support/issue408/test.py b/functional_tests/support/issue408/test.py
new file mode 100644 (file)
index 0000000..a5e88e9
--- /dev/null
@@ -0,0 +1,16 @@
+class base:
+    @classmethod
+    def setup_class(cls):
+        cls.inited = 1
+    @classmethod
+    def teardown_class(cls):
+        cls.inited = 0
+    def test1(self):
+        assert self.inited
+    def test2(self):
+        assert self.inited
+
+class testa(base):
+    pass
+class testb(base):
+    pass
diff --git a/functional_tests/support/issue408/test.pyc b/functional_tests/support/issue408/test.pyc
new file mode 100644 (file)
index 0000000..2231811
Binary files /dev/null and b/functional_tests/support/issue408/test.pyc differ
diff --git a/functional_tests/support/ltfn/state$py.class b/functional_tests/support/ltfn/state$py.class
new file mode 100644 (file)
index 0000000..ddda8d6
Binary files /dev/null and b/functional_tests/support/ltfn/state$py.class differ
diff --git a/functional_tests/support/ltfn/state.py b/functional_tests/support/ltfn/state.py
new file mode 100644 (file)
index 0000000..f931b21
--- /dev/null
@@ -0,0 +1 @@
+called = []
diff --git a/functional_tests/support/ltfn/state.pyc b/functional_tests/support/ltfn/state.pyc
new file mode 100644 (file)
index 0000000..3f9bb44
Binary files /dev/null and b/functional_tests/support/ltfn/state.pyc differ
diff --git a/functional_tests/support/ltfn/test_mod$py.class b/functional_tests/support/ltfn/test_mod$py.class
new file mode 100644 (file)
index 0000000..7e833bf
Binary files /dev/null and b/functional_tests/support/ltfn/test_mod$py.class differ
diff --git a/functional_tests/support/ltfn/test_mod.py b/functional_tests/support/ltfn/test_mod.py
new file mode 100644 (file)
index 0000000..b98d207
--- /dev/null
@@ -0,0 +1,10 @@
+from state import called
+
+def setup():
+    called.append('test_mod.setup')
+
+def test_mod():
+    called.append('test_mod.test_mod')
+
+def teardown():
+    called.append('test_mod.teardown')
diff --git a/functional_tests/support/ltfn/test_mod.pyc b/functional_tests/support/ltfn/test_mod.pyc
new file mode 100644 (file)
index 0000000..a04ddfe
Binary files /dev/null and b/functional_tests/support/ltfn/test_mod.pyc differ
diff --git a/functional_tests/support/ltfn/test_pak1/__init__$py.class b/functional_tests/support/ltfn/test_pak1/__init__$py.class
new file mode 100644 (file)
index 0000000..8f338be
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak1/__init__$py.class differ
diff --git a/functional_tests/support/ltfn/test_pak1/__init__.py b/functional_tests/support/ltfn/test_pak1/__init__.py
new file mode 100644 (file)
index 0000000..7087716
--- /dev/null
@@ -0,0 +1,13 @@
+from state import called
+
+def setup():
+    called.append('test_pak1.setup')
+
+def teardown():
+    called.append('test_pak1.teardown')
+
+def test_one_one():
+    called.append('test_pak1.test_one_one')
+
+def test_one_two():
+    called.append('test_pak1.test_one_two')
diff --git a/functional_tests/support/ltfn/test_pak1/__init__.pyc b/functional_tests/support/ltfn/test_pak1/__init__.pyc
new file mode 100644 (file)
index 0000000..bd6403a
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak1/__init__.pyc differ
diff --git a/functional_tests/support/ltfn/test_pak1/test_mod$py.class b/functional_tests/support/ltfn/test_pak1/test_mod$py.class
new file mode 100644 (file)
index 0000000..a71fc4f
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak1/test_mod$py.class differ
diff --git a/functional_tests/support/ltfn/test_pak1/test_mod.py b/functional_tests/support/ltfn/test_pak1/test_mod.py
new file mode 100644 (file)
index 0000000..dd565da
--- /dev/null
@@ -0,0 +1,12 @@
+from state import called
+
+def setup():
+    called.append('test_pak1.test_mod.setup')
+
+def teardown():
+    called.append('test_pak1.test_mod.teardown')
+
+def test_one_mod_one():
+    called.append('test_pak1.test_mod.test_one_mod_one')
+    pass
+
diff --git a/functional_tests/support/ltfn/test_pak1/test_mod.pyc b/functional_tests/support/ltfn/test_pak1/test_mod.pyc
new file mode 100644 (file)
index 0000000..caa9b12
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak1/test_mod.pyc differ
diff --git a/functional_tests/support/ltfn/test_pak2/__init__$py.class b/functional_tests/support/ltfn/test_pak2/__init__$py.class
new file mode 100644 (file)
index 0000000..2e14e5c
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak2/__init__$py.class differ
diff --git a/functional_tests/support/ltfn/test_pak2/__init__.py b/functional_tests/support/ltfn/test_pak2/__init__.py
new file mode 100644 (file)
index 0000000..88a2ae8
--- /dev/null
@@ -0,0 +1,13 @@
+from state import called
+
+def setup():
+    called.append('test_pak2.setup')
+
+def teardown():
+    called.append('test_pak2.teardown')
+    
+def test_two_one():
+    called.append('test_pak2.test_two_one')
+
+def test_two_two():
+    called.append('test_pak2.test_two_two')
diff --git a/functional_tests/support/ltfn/test_pak2/__init__.pyc b/functional_tests/support/ltfn/test_pak2/__init__.pyc
new file mode 100644 (file)
index 0000000..7f25b6d
Binary files /dev/null and b/functional_tests/support/ltfn/test_pak2/__init__.pyc differ
diff --git a/functional_tests/support/ltftc/tests$py.class b/functional_tests/support/ltftc/tests$py.class
new file mode 100644 (file)
index 0000000..95254b2
Binary files /dev/null and b/functional_tests/support/ltftc/tests$py.class differ
diff --git a/functional_tests/support/ltftc/tests.py b/functional_tests/support/ltftc/tests.py
new file mode 100644 (file)
index 0000000..02208a1
--- /dev/null
@@ -0,0 +1,9 @@
+import unittest
+
+class Tests(unittest.TestCase):
+
+    def setUp(self):
+        self.value = 1
+
+    def test_value(self):
+        self.assertEqual(self.value, 1)
diff --git a/functional_tests/support/ltftc/tests.pyc b/functional_tests/support/ltftc/tests.pyc
new file mode 100644 (file)
index 0000000..fed34d5
Binary files /dev/null and b/functional_tests/support/ltftc/tests.pyc differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/__init__$py.class b/functional_tests/support/namespace_pkg/namespace_pkg/__init__$py.class
new file mode 100644 (file)
index 0000000..c570315
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/__init__$py.class differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/__init__.py b/functional_tests/support/namespace_pkg/namespace_pkg/__init__.py
new file mode 100644 (file)
index 0000000..86fe67d
--- /dev/null
@@ -0,0 +1,2 @@
+import pkgutil
+__path__ = pkgutil.extend_path(__path__, __name__) 
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/__init__.pyc b/functional_tests/support/namespace_pkg/namespace_pkg/__init__.pyc
new file mode 100644 (file)
index 0000000..36eb9ce
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/__init__.pyc differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/example$py.class b/functional_tests/support/namespace_pkg/namespace_pkg/example$py.class
new file mode 100644 (file)
index 0000000..9df8e99
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/example$py.class differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/example.py b/functional_tests/support/namespace_pkg/namespace_pkg/example.py
new file mode 100644 (file)
index 0000000..86dda80
--- /dev/null
@@ -0,0 +1 @@
+test = 'the nose knows'
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/example.pyc b/functional_tests/support/namespace_pkg/namespace_pkg/example.pyc
new file mode 100644 (file)
index 0000000..de3bdff
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/example.pyc differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg$py.class b/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg$py.class
new file mode 100644 (file)
index 0000000..7faeae2
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg$py.class differ
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.py b/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.py
new file mode 100644 (file)
index 0000000..9315c52
--- /dev/null
@@ -0,0 +1,6 @@
+from namespace_pkg import example
+from namespace_pkg import example2
+
+def test_namespace_pkg():
+    assert example.test == 'the nose knows'
+    assert example2.test == 'put that snoot to use'
diff --git a/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.pyc b/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.pyc
new file mode 100644 (file)
index 0000000..abcaebc
Binary files /dev/null and b/functional_tests/support/namespace_pkg/namespace_pkg/test_pkg.pyc differ
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/__init__.py b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/__init__.py
new file mode 100644 (file)
index 0000000..86fe67d
--- /dev/null
@@ -0,0 +1,2 @@
+import pkgutil
+__path__ = pkgutil.extend_path(__path__, __name__) 
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2$py.class b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2$py.class
new file mode 100644 (file)
index 0000000..2a9326b
Binary files /dev/null and b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2$py.class differ
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.py b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.py
new file mode 100644 (file)
index 0000000..47f20d0
--- /dev/null
@@ -0,0 +1 @@
+test = 'put that snoot to use'
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.pyc b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.pyc
new file mode 100644 (file)
index 0000000..59959b4
Binary files /dev/null and b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/example2.pyc differ
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2$py.class b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2$py.class
new file mode 100644 (file)
index 0000000..cea159e
Binary files /dev/null and b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2$py.class differ
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.py b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.py
new file mode 100644 (file)
index 0000000..44ed6c1
--- /dev/null
@@ -0,0 +1,6 @@
+from namespace_pkg import example
+from namespace_pkg import example2
+
+def test_namespace_pkg2():
+    assert example.test == 'the nose knows'
+    assert example2.test == 'put that snoot to use'
diff --git a/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.pyc b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.pyc
new file mode 100644 (file)
index 0000000..c2d933a
Binary files /dev/null and b/functional_tests/support/namespace_pkg/site-packages/namespace_pkg/test_pkg2.pyc differ
diff --git a/functional_tests/support/package1/example$py.class b/functional_tests/support/package1/example$py.class
new file mode 100644 (file)
index 0000000..8483b44
Binary files /dev/null and b/functional_tests/support/package1/example$py.class differ
diff --git a/functional_tests/support/package1/example.py b/functional_tests/support/package1/example.py
new file mode 100644 (file)
index 0000000..d758778
--- /dev/null
@@ -0,0 +1,8 @@
+def times_two(a):
+    """
+    >>> times_two(2)
+    4
+    >>> times_two('bee')
+    beebee
+    """
+    return a * 2
diff --git a/functional_tests/support/package1/example.pyc b/functional_tests/support/package1/example.pyc
new file mode 100644 (file)
index 0000000..9fefff2
Binary files /dev/null and b/functional_tests/support/package1/example.pyc differ
diff --git a/functional_tests/support/package1/tests/test_example_function$py.class b/functional_tests/support/package1/tests/test_example_function$py.class
new file mode 100644 (file)
index 0000000..c63b8ff
Binary files /dev/null and b/functional_tests/support/package1/tests/test_example_function$py.class differ
diff --git a/functional_tests/support/package1/tests/test_example_function.py b/functional_tests/support/package1/tests/test_example_function.py
new file mode 100644 (file)
index 0000000..d74f2a5
--- /dev/null
@@ -0,0 +1,15 @@
+import example
+import unittest
+
+class TestExampleFunction_TestCase(unittest.TestCase):
+    def test_times_two(self):
+        self.assertEqual(example.times_two(2), 4)
+
+
+class TestExampleFunction:
+    def test_times_two(self):
+        assert example.times_two(2) == 4
+
+
+def test_times_two():
+    assert example.times_two(2) == 4
diff --git a/functional_tests/support/package1/tests/test_example_function.pyc b/functional_tests/support/package1/tests/test_example_function.pyc
new file mode 100644 (file)
index 0000000..674c778
Binary files /dev/null and b/functional_tests/support/package1/tests/test_example_function.pyc differ
diff --git a/functional_tests/support/package2/maths$py.class b/functional_tests/support/package2/maths$py.class
new file mode 100644 (file)
index 0000000..7eae3a5
Binary files /dev/null and b/functional_tests/support/package2/maths$py.class differ
diff --git a/functional_tests/support/package2/maths.py b/functional_tests/support/package2/maths.py
new file mode 100644 (file)
index 0000000..abd85e5
--- /dev/null
@@ -0,0 +1,11 @@
+def add(a, b):
+    return a + b
+
+def div(a, b):
+    return a // b
+
+def minus(a, b):
+    return a - b
+
+def mult(a, b):
+    return a * b
diff --git a/functional_tests/support/package2/maths.pyc b/functional_tests/support/package2/maths.pyc
new file mode 100644 (file)
index 0000000..5a18b5a
Binary files /dev/null and b/functional_tests/support/package2/maths.pyc differ
diff --git a/functional_tests/support/package2/test_pak/__init__$py.class b/functional_tests/support/package2/test_pak/__init__$py.class
new file mode 100644 (file)
index 0000000..9f7b4a7
Binary files /dev/null and b/functional_tests/support/package2/test_pak/__init__$py.class differ
diff --git a/functional_tests/support/package2/test_pak/__init__.py b/functional_tests/support/package2/test_pak/__init__.py
new file mode 100644 (file)
index 0000000..3a05d5f
--- /dev/null
@@ -0,0 +1,11 @@
+print "*** test_pak imported"
+state = []
+
+def setup():
+    # print "SETUP CALLED", state, id(state)
+    state.append('test_pak.setup')
+
+
+def teardown():
+    # print "TEARDOWN CALLED", state, id(state)
+    state.append('test_pak.teardown')
diff --git a/functional_tests/support/package2/test_pak/__init__.pyc b/functional_tests/support/package2/test_pak/__init__.pyc
new file mode 100644 (file)
index 0000000..50777a8
Binary files /dev/null and b/functional_tests/support/package2/test_pak/__init__.pyc differ
diff --git a/functional_tests/support/package2/test_pak/test_mod$py.class b/functional_tests/support/package2/test_pak/test_mod$py.class
new file mode 100644 (file)
index 0000000..e967fe1
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_mod$py.class differ
diff --git a/functional_tests/support/package2/test_pak/test_mod.py b/functional_tests/support/package2/test_pak/test_mod.py
new file mode 100644 (file)
index 0000000..09dc9e5
--- /dev/null
@@ -0,0 +1,20 @@
+print "test_mod imported!"
+
+import maths
+from test_pak import state
+
+def setup():
+    print "MOD setup called", state, id(state)
+    state.append('test_pak.test_mod.setup')
+
+def test_add():
+    print "MOD.test_add called", state, id(state)
+    state.append('test_pak.test_mod.test_add')
+    assert maths.add(1, 2) == 3
+
+def test_minus():
+    state.append('test_pak.test_mod.test_minus')
+    
+def teardown():
+    print "MOD teardown called", state, id(state)
+    state.append('test_pak.test_mod.teardown')
diff --git a/functional_tests/support/package2/test_pak/test_mod.pyc b/functional_tests/support/package2/test_pak/test_mod.pyc
new file mode 100644 (file)
index 0000000..0deba42
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_mod.pyc differ
diff --git a/functional_tests/support/package2/test_pak/test_sub/__init__$py.class b/functional_tests/support/package2/test_pak/test_sub/__init__$py.class
new file mode 100644 (file)
index 0000000..d776b66
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_sub/__init__$py.class differ
diff --git a/functional_tests/support/package2/test_pak/test_sub/__init__.py b/functional_tests/support/package2/test_pak/test_sub/__init__.py
new file mode 100644 (file)
index 0000000..a3d051a
--- /dev/null
@@ -0,0 +1,9 @@
+from test_pak import state
+
+def setup():
+    # print "SUB setup called", state, id(state)
+    state.append('test_pak.test_sub.setup')
+
+def teardown():
+    # print "SUB teardown called", state, id(state)
+    state.append('test_pak.test_sub.teardown')
diff --git a/functional_tests/support/package2/test_pak/test_sub/__init__.pyc b/functional_tests/support/package2/test_pak/test_sub/__init__.pyc
new file mode 100644 (file)
index 0000000..003c49a
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_sub/__init__.pyc differ
diff --git a/functional_tests/support/package2/test_pak/test_sub/test_mod$py.class b/functional_tests/support/package2/test_pak/test_sub/test_mod$py.class
new file mode 100644 (file)
index 0000000..02f576e
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_sub/test_mod$py.class differ
diff --git a/functional_tests/support/package2/test_pak/test_sub/test_mod.py b/functional_tests/support/package2/test_pak/test_sub/test_mod.py
new file mode 100644 (file)
index 0000000..2c77c97
--- /dev/null
@@ -0,0 +1,36 @@
+from test_pak import state
+import maths
+
+def setup():
+    state.append('test_pak.test_sub.test_mod.setup')
+
+def test():
+    state.append('test_pak.test_sub.test_mod.test')
+    assert maths.add(1, 2) == 3
+
+class TestMaths:
+
+    def setup_class(cls):
+        state.append('test_pak.test_sub.test_mod.TestMaths.setup_class')
+    setup_class = classmethod(setup_class)
+
+    def teardown_class(cls):
+        state.append('test_pak.test_sub.test_mod.TestMaths.teardown_class')
+    teardown_class = classmethod(teardown_class)
+    
+    def setup(self):
+        state.append('test_pak.test_sub.test_mod.TestMaths.setup')
+
+    def teardown(self):
+        state.append('test_pak.test_sub.test_mod.TestMaths.teardown')
+        
+    def test_div(self):
+        state.append('test_pak.test_sub.test_mod.TestMaths.test_div')
+        assert maths.div(2, 1) == 2, "%s != %s" % (maths.div(2, 1), 2)
+
+    def test_two_two(self):
+        state.append('test_pak.test_sub.test_mod.TestMaths.test_two_two')
+        assert maths.mult(2, 2) == maths.add(2, 2)
+    
+def teardown():
+    state.append('test_pak.test_sub.test_mod.teardown')    
diff --git a/functional_tests/support/package2/test_pak/test_sub/test_mod.pyc b/functional_tests/support/package2/test_pak/test_sub/test_mod.pyc
new file mode 100644 (file)
index 0000000..0b5ac5c
Binary files /dev/null and b/functional_tests/support/package2/test_pak/test_sub/test_mod.pyc differ
diff --git a/functional_tests/support/package3/lib/a$py.class b/functional_tests/support/package3/lib/a$py.class
new file mode 100644 (file)
index 0000000..5628cbc
Binary files /dev/null and b/functional_tests/support/package3/lib/a$py.class differ
diff --git a/functional_tests/support/package3/lib/a.py b/functional_tests/support/package3/lib/a.py
new file mode 100644 (file)
index 0000000..b2dde25
--- /dev/null
@@ -0,0 +1,2 @@
+def a():
+    pass
diff --git a/functional_tests/support/package3/lib/a.pyc b/functional_tests/support/package3/lib/a.pyc
new file mode 100644 (file)
index 0000000..74202a2
Binary files /dev/null and b/functional_tests/support/package3/lib/a.pyc differ
diff --git a/functional_tests/support/package3/src/b$py.class b/functional_tests/support/package3/src/b$py.class
new file mode 100644 (file)
index 0000000..559cdec
Binary files /dev/null and b/functional_tests/support/package3/src/b$py.class differ
diff --git a/functional_tests/support/package3/src/b.py b/functional_tests/support/package3/src/b.py
new file mode 100644 (file)
index 0000000..40aaa8c
--- /dev/null
@@ -0,0 +1,2 @@
+def b():
+    pass
diff --git a/functional_tests/support/package3/src/b.pyc b/functional_tests/support/package3/src/b.pyc
new file mode 100644 (file)
index 0000000..a319635
Binary files /dev/null and b/functional_tests/support/package3/src/b.pyc differ
diff --git a/functional_tests/support/package3/tests/test_a$py.class b/functional_tests/support/package3/tests/test_a$py.class
new file mode 100644 (file)
index 0000000..6b2750d
Binary files /dev/null and b/functional_tests/support/package3/tests/test_a$py.class differ
diff --git a/functional_tests/support/package3/tests/test_a.py b/functional_tests/support/package3/tests/test_a.py
new file mode 100644 (file)
index 0000000..3b978fc
--- /dev/null
@@ -0,0 +1,4 @@
+import a
+
+def test_a():
+    a.a()
diff --git a/functional_tests/support/package3/tests/test_a.pyc b/functional_tests/support/package3/tests/test_a.pyc
new file mode 100644 (file)
index 0000000..3731991
Binary files /dev/null and b/functional_tests/support/package3/tests/test_a.pyc differ
diff --git a/functional_tests/support/package3/tests/test_b$py.class b/functional_tests/support/package3/tests/test_b$py.class
new file mode 100644 (file)
index 0000000..9b81e4d
Binary files /dev/null and b/functional_tests/support/package3/tests/test_b$py.class differ
diff --git a/functional_tests/support/package3/tests/test_b.py b/functional_tests/support/package3/tests/test_b.py
new file mode 100644 (file)
index 0000000..d8c182a
--- /dev/null
@@ -0,0 +1,4 @@
+import b
+
+def test_b():
+    b.b()
diff --git a/functional_tests/support/package3/tests/test_b.pyc b/functional_tests/support/package3/tests/test_b.pyc
new file mode 100644 (file)
index 0000000..93148cf
Binary files /dev/null and b/functional_tests/support/package3/tests/test_b.pyc differ
diff --git a/functional_tests/support/pass/test$py.class b/functional_tests/support/pass/test$py.class
new file mode 100644 (file)
index 0000000..56cce80
Binary files /dev/null and b/functional_tests/support/pass/test$py.class differ
diff --git a/functional_tests/support/pass/test.py b/functional_tests/support/pass/test.py
new file mode 100644 (file)
index 0000000..f174823
--- /dev/null
@@ -0,0 +1,2 @@
+def test():
+    pass
diff --git a/functional_tests/support/pass/test.pyc b/functional_tests/support/pass/test.pyc
new file mode 100644 (file)
index 0000000..b4ab485
Binary files /dev/null and b/functional_tests/support/pass/test.pyc differ
diff --git a/functional_tests/support/test.cfg b/functional_tests/support/test.cfg
new file mode 100644 (file)
index 0000000..738c764
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+verbosity=10
diff --git a/functional_tests/support/test_buggy_generators$py.class b/functional_tests/support/test_buggy_generators$py.class
new file mode 100644 (file)
index 0000000..acdd55c
Binary files /dev/null and b/functional_tests/support/test_buggy_generators$py.class differ
diff --git a/functional_tests/support/test_buggy_generators.py b/functional_tests/support/test_buggy_generators.py
new file mode 100644 (file)
index 0000000..00e4815
--- /dev/null
@@ -0,0 +1,29 @@
+def test_generator_fails_before_yield():
+    a = 1 // 0
+    yield lambda: True
+
+
+def test_generator_fails_during_iteration():
+    for i in [1, 2, 3, 0, 5, 6]:
+        a = 1 // i
+        yield lambda: True
+
+
+def test_ok():
+    pass
+
+
+class TestBuggyGenerators(object):
+
+    def test_generator_fails_before_yield(self):
+        a = 1 // 0
+        yield lambda: True
+
+    def test_generator_fails_during_iteration(self):
+        for i in [1, 2, 3, 0, 5, 6]:
+            a = 1 // i
+            yield lambda: True
+
+    def test_ok(self):
+        pass
+
diff --git a/functional_tests/support/test_buggy_generators.pyc b/functional_tests/support/test_buggy_generators.pyc
new file mode 100644 (file)
index 0000000..93340de
Binary files /dev/null and b/functional_tests/support/test_buggy_generators.pyc differ
diff --git a/functional_tests/support/todo/test_with_todo$py.class b/functional_tests/support/todo/test_with_todo$py.class
new file mode 100644 (file)
index 0000000..9ca745e
Binary files /dev/null and b/functional_tests/support/todo/test_with_todo$py.class differ
diff --git a/functional_tests/support/todo/test_with_todo.py b/functional_tests/support/todo/test_with_todo.py
new file mode 100644 (file)
index 0000000..b48f6a8
--- /dev/null
@@ -0,0 +1,7 @@
+from todoplug import Todo
+
+def test_some_important_thing():
+    raise Todo("Not done yet")
+
+def test_something_else():
+    pass
diff --git a/functional_tests/support/todo/test_with_todo.pyc b/functional_tests/support/todo/test_with_todo.pyc
new file mode 100644 (file)
index 0000000..04ae346
Binary files /dev/null and b/functional_tests/support/todo/test_with_todo.pyc differ
diff --git a/functional_tests/support/todo/todoplug$py.class b/functional_tests/support/todo/todoplug$py.class
new file mode 100644 (file)
index 0000000..a13a34d
Binary files /dev/null and b/functional_tests/support/todo/todoplug$py.class differ
diff --git a/functional_tests/support/todo/todoplug.py b/functional_tests/support/todo/todoplug.py
new file mode 100644 (file)
index 0000000..585c26a
--- /dev/null
@@ -0,0 +1,7 @@
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+class Todo(Exception):
+    pass
+
+class TodoPlugin(ErrorClassPlugin):
+    todo = ErrorClass(Todo, label='TODO', isfailure=True)
diff --git a/functional_tests/support/todo/todoplug.pyc b/functional_tests/support/todo/todoplug.pyc
new file mode 100644 (file)
index 0000000..761015e
Binary files /dev/null and b/functional_tests/support/todo/todoplug.pyc differ
diff --git a/functional_tests/support/twist/test_twisted.py b/functional_tests/support/twist/test_twisted.py
new file mode 100644 (file)
index 0000000..d6c57c2
--- /dev/null
@@ -0,0 +1,15 @@
+from twisted.trial import unittest
+
+class TestTwisted(unittest.TestCase):
+
+    def test(self):
+        pass
+
+    def test_fail(self):
+        self.fail("I failed")
+
+    def test_error(self):
+        raise TypeError("oops, wrong type")
+
+    def test_skip(self):
+        raise unittest.SkipTest('skip me')
diff --git a/functional_tests/support/xunit.xml b/functional_tests/support/xunit.xml
new file mode 100644 (file)
index 0000000..ca2ccf4
--- /dev/null
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?><testsuite name="nosetests" tests="6" errors="2" failures="1" skip="1"><testcase classname="test_xunit_as_suite.TestForXunit" name="test_error" time="0.002"><error type="exceptions.TypeError" message="oops, wrong type"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/functional_tests/support/xunit/test_xunit_as_suite.py", line 15, in test_error
+    raise TypeError("oops, wrong type")
+TypeError: oops, wrong type
+]]></error></testcase><testcase classname="test_xunit_as_suite.TestForXunit" name="test_fail" time="0.002"><failure type="exceptions.AssertionError" message="'this' != 'that'"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/functional_tests/support/xunit/test_xunit_as_suite.py", line 12, in test_fail
+    self.assertEqual("this","that")
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 333, in failUnlessEqual
+    raise self.failureException, \
+AssertionError: 'this' != 'that'
+]]></failure></testcase><testcase classname="test_xunit_as_suite.TestForXunit" name="test_non_ascii_error" time="0.001"><error type="exceptions.Exception" message="日本"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/functional_tests/support/xunit/test_xunit_as_suite.py", line 18, in test_non_ascii_error
+    raise Exception(u"日本")
+Exception: <unprintable Exception object>
+]]></error></testcase><testcase classname="test_xunit_as_suite.TestForXunit" name="test_output" time="0.001" /><testcase classname="test_xunit_as_suite.TestForXunit" name="test_skip" time="0.000"><skipped type="nose.plugins.skip.SkipTest" message="skipit"><![CDATA[Traceback (most recent call last):
+  File "/usr/local/Cellar/jython/2.5.1/libexec/Lib/unittest.py", line 260, in run
+    testMethod()
+  File "/private/tmp/nose_release_1.1.2/functional_tests/support/xunit/test_xunit_as_suite.py", line 24, in test_skip
+    raise SkipTest("skipit")
+SkipTest: skipit
+]]></skipped></testcase><testcase classname="test_xunit_as_suite.TestForXunit" name="test_success" time="0.001" /></testsuite>
\ No newline at end of file
diff --git a/functional_tests/support/xunit/test_xunit_as_suite$py.class b/functional_tests/support/xunit/test_xunit_as_suite$py.class
new file mode 100644 (file)
index 0000000..58de0d4
Binary files /dev/null and b/functional_tests/support/xunit/test_xunit_as_suite$py.class differ
diff --git a/functional_tests/support/xunit/test_xunit_as_suite.py b/functional_tests/support/xunit/test_xunit_as_suite.py
new file mode 100644 (file)
index 0000000..ec256b5
--- /dev/null
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+import sys
+from nose.exc import SkipTest
+import unittest
+
+class TestForXunit(unittest.TestCase):
+
+    def test_success(self):
+        pass
+
+    def test_fail(self):
+        self.assertEqual("this","that")
+
+    def test_error(self):
+        raise TypeError("oops, wrong type")
+    
+    def test_non_ascii_error(self):
+        raise Exception(u"日本")
+    
+    def test_output(self):
+        sys.stdout.write("test-generated output\n")
+
+    def test_skip(self):
+        raise SkipTest("skipit")
diff --git a/functional_tests/support/xunit/test_xunit_as_suite.pyc b/functional_tests/support/xunit/test_xunit_as_suite.pyc
new file mode 100644 (file)
index 0000000..42a7f84
Binary files /dev/null and b/functional_tests/support/xunit/test_xunit_as_suite.pyc differ
diff --git a/functional_tests/test_attribute_plugin$py.class b/functional_tests/test_attribute_plugin$py.class
new file mode 100644 (file)
index 0000000..5b26686
Binary files /dev/null and b/functional_tests/test_attribute_plugin$py.class differ
diff --git a/functional_tests/test_attribute_plugin.py b/functional_tests/test_attribute_plugin.py
new file mode 100644 (file)
index 0000000..a093cd5
--- /dev/null
@@ -0,0 +1,181 @@
+import os
+import sys
+import unittest
+from nose.plugins.attrib import AttributeSelector
+from nose.plugins import PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+compat_24 = sys.version_info >= (2, 4)
+
+class AttributePluginTester(PluginTester, unittest.TestCase):
+    plugins = [AttributeSelector()]
+    suitepath = os.path.join(support, 'att')
+    # Some cases need -a to activate and others need -A, so
+    # let's treat -v as the activate argument and let individual
+    # cases specify their -a arguments as part of args
+    activate = '-v'
+
+    def runTest(self):
+        print '*' * 70
+        print str(self.output)
+        print '*' * 70
+        self.verify()
+
+    def verify(self):
+        raise NotImplementedError()
+
+
+class TestSimpleAttribute(AttributePluginTester):
+    args = ["-a", "a"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' in self.output
+        assert 'test_attr.test_two ... ok' in self.output
+        assert 'TestClass.test_class_one ... ok' in self.output
+        assert 'TestClass.test_class_two ... ok' in self.output
+        assert 'TestClass.test_class_three ... ok' in self.output
+        assert 'test_three' not in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+        assert 'TestAttrClass.test_one ... ok' in self.output
+        assert 'TestAttrClass.test_two ... ok' in self.output
+        assert 'TestAttrClass.ends_with_test ... ok' in self.output
+
+
+class TestNotSimpleAttribute(AttributePluginTester):
+    args = ["-a", "!a"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' not in self.output
+        assert 'test_attr.test_two ... ok' not in self.output
+        assert 'TestClass.test_class_one ... ok' not in self.output
+        assert 'TestClass.test_class_two ... ok' not in self.output
+        assert 'TestClass.test_class_three ... ok' not in self.output
+        assert 'test_three' in self.output
+        assert 'test_case_two' in self.output
+        assert 'test_case_one' in self.output
+        assert 'test_case_three' in self.output
+
+
+class TestAttributeValue(AttributePluginTester):
+    args = ["-a", "b=2"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' not in self.output
+        assert 'test_attr.test_two ... ok' not in self.output
+        assert 'test_attr.test_three ... ok' not in self.output
+        assert 'TestClass.test_class_one ... ok' not in self.output
+        assert 'TestClass.test_class_two ... ok' in self.output
+        assert 'TestClass.test_class_three ... ok' not in self.output
+        assert 'test_case_two' in self.output
+        assert 'test_case_one' in self.output
+        assert 'test_case_three' in self.output
+
+
+class TestAttributeArray(AttributePluginTester):
+    args = ["-a", "d=2"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' in self.output
+        assert 'test_attr.test_two ... ok' in self.output
+        assert 'test_attr.test_three ... ok' not in self.output
+        assert 'TestClass.test_class_one ... ok' not in self.output
+        assert 'TestClass.test_class_two ... ok' not in self.output
+        assert 'TestClass.test_class_three ... ok' not in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+
+
+class TestAttributeArrayAnd(AttributePluginTester):
+    args = ["-a", "d=1,d=2"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' in self.output
+        assert 'test_attr.test_two ... ok' not in self.output
+        assert 'test_attr.test_three ... ok' not in self.output
+        assert 'TestClass.test_class_one ... ok' not in self.output
+        assert 'TestClass.test_class_two ... ok' not in self.output
+        assert 'TestClass.test_class_three ... ok' not in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+
+
+class TestAttributeArrayOr(AttributePluginTester):
+    args = ["-a", "d=1", "-a", "d=2"]
+
+    def verify(self):
+        assert 'test_attr.test_one ... ok' in self.output
+        assert 'test_attr.test_two ... ok' in self.output
+        assert 'test_attr.test_three ... ok' in self.output
+        assert 'TestClass.test_class_one ... ok' not in self.output
+        assert 'TestClass.test_class_two ... ok' not in self.output
+        assert 'TestClass.test_class_three ... ok' not in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+        
+
+class TestInheritance(AttributePluginTester):
+    # Issue #412
+    args = ["-a", "from_super"]
+
+    def verify(self):
+        assert 'TestSubclass.test_method ... ok' in self.output
+        assert 'TestAttrSubClass.test_sub_three ... ok' in self.output
+        assert 'TestAttrSubClass.test_one ... ok' in self.output
+        assert 'TestAttrSubClass.added_later_test ... ok' in self.output
+        assert 'test_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+
+
+class TestStatic(AttributePluginTester):
+    # Issue #411
+    args = ["-a", "with_static"]
+    suitepath = os.path.join(support, 'att', 'test_attr.py:Static')
+
+    def verify(self):
+        assert 'Static.test_with_static ... ok' in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+
+
+class TestClassAndMethodAttrs(AttributePluginTester):
+    # Issue #324
+    args = ["-a", "meth_attr=method,cls_attr=class"]
+
+    def verify(self):
+        assert '(test_attr.TestClassAndMethodAttrs) ... ok' in self.output
+        assert 'test_case_two' not in self.output
+        assert 'test_case_one' not in self.output
+        assert 'test_case_three' not in self.output
+
+
+if compat_24:
+    class TestAttributeEval(AttributePluginTester):
+        args = ["-A", "c>20"]
+
+        def verify(self):
+            assert 'test_attr.test_one ... ok' not in self.output
+            assert 'test_attr.test_two ... ok' not in self.output
+            assert 'test_attr.test_three ... ok' not in self.output
+            assert 'TestClass.test_class_one ... ok' not in self.output
+            assert 'TestClass.test_class_two ... ok' not in self.output
+            assert 'TestClass.test_class_three ... ok' not in self.output
+            assert 'test_case_two' in self.output
+            assert 'test_case_one' not in self.output
+            assert 'test_case_three' not in self.output
+
+
+# Avoid trying to run base class as tests
+del AttributePluginTester
+
+if __name__ == '__main__':
+    #import logging
+    #logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/functional_tests/test_attribute_plugin.pyc b/functional_tests/test_attribute_plugin.pyc
new file mode 100644 (file)
index 0000000..b25237b
Binary files /dev/null and b/functional_tests/test_attribute_plugin.pyc differ
diff --git a/functional_tests/test_buggy_generators$py.class b/functional_tests/test_buggy_generators$py.class
new file mode 100644 (file)
index 0000000..d9ee34a
Binary files /dev/null and b/functional_tests/test_buggy_generators$py.class differ
diff --git a/functional_tests/test_buggy_generators.py b/functional_tests/test_buggy_generators.py
new file mode 100644 (file)
index 0000000..9e6e168
--- /dev/null
@@ -0,0 +1,36 @@
+import os
+import unittest
+from cStringIO import StringIO
+from nose.core import TestProgram
+from nose.config import Config
+from nose.result import _TextTestResult
+
+here = os.path.dirname(__file__)
+support = os.path.join(here, 'support')
+
+
+class TestRunner(unittest.TextTestRunner):
+    def _makeResult(self):
+        self.result = _TextTestResult(
+            self.stream, self.descriptions, self.verbosity)
+        return self.result
+
+    
+class TestBuggyGenerators(unittest.TestCase):
+    def test_run_buggy_generators(self):
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+        prog = TestProgram(
+            argv=['nosetests',
+                  os.path.join(support, 'test_buggy_generators.py')],
+            testRunner=runner,
+            config=Config(),
+            exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 12,
+                         "Expected to run 12 tests, ran %s" % res.testsRun)
+        assert not res.wasSuccessful()
+        assert len(res.errors) == 4
+        assert not res.failures
+    
diff --git a/functional_tests/test_buggy_generators.pyc b/functional_tests/test_buggy_generators.pyc
new file mode 100644 (file)
index 0000000..f4e33d3
Binary files /dev/null and b/functional_tests/test_buggy_generators.pyc differ
diff --git a/functional_tests/test_cases$py.class b/functional_tests/test_cases$py.class
new file mode 100644 (file)
index 0000000..675bc99
Binary files /dev/null and b/functional_tests/test_cases$py.class differ
diff --git a/functional_tests/test_cases.py b/functional_tests/test_cases.py
new file mode 100644 (file)
index 0000000..99b9071
--- /dev/null
@@ -0,0 +1,38 @@
+import unittest
+from nose.config import Config
+from nose import case
+from nose.plugins import Plugin, PluginManager
+
+class TestTestCasePluginCalls(unittest.TestCase):
+
+    def test_describe_test_called(self):
+        class Descrip(Plugin):
+            counter = 0
+            enabled = True
+            def describeTest(self, test):
+                return "test #%s" % id(test)
+            def testName(self, test):
+                self.counter += 1
+                return "(%s) test" % self.counter
+
+        class TC(unittest.TestCase):
+            def test_one(self):
+                pass
+            def test_two(self):
+                pass
+
+        config = Config(plugins=PluginManager(plugins=[Descrip()]))
+
+        c1 = case.Test(TC('test_one'), config=config)
+        c2 = case.Test(TC('test_two'), config=config)
+
+        self.assertEqual(str(c1), '(1) test')
+        self.assertEqual(str(c2), '(2) test')
+        assert c1.shortDescription().startswith('test #'), \
+               "Unexpected shortDescription: %s" % c1.shortDescription()
+        assert c2.shortDescription().startswith('test #'), \
+               "Unexpected shortDescription: %s" % c2.shortDescription()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_cases.pyc b/functional_tests/test_cases.pyc
new file mode 100644 (file)
index 0000000..a98bd7e
Binary files /dev/null and b/functional_tests/test_cases.pyc differ
diff --git a/functional_tests/test_collector$py.class b/functional_tests/test_collector$py.class
new file mode 100644 (file)
index 0000000..bdc2c85
Binary files /dev/null and b/functional_tests/test_collector$py.class differ
diff --git a/functional_tests/test_collector.py b/functional_tests/test_collector.py
new file mode 100644 (file)
index 0000000..c3b9dca
--- /dev/null
@@ -0,0 +1,46 @@
+import os
+import sys
+import unittest
+import warnings
+from cStringIO import StringIO
+from nose.result import _TextTestResult
+here = os.path.dirname(__file__)
+support = os.path.join(here, 'support')
+
+
+class TestRunner(unittest.TextTestRunner):
+    def _makeResult(self):
+        self.result = _TextTestResult(
+            self.stream, self.descriptions, self.verbosity)
+        return self.result
+
+
+class TestNoseTestCollector(unittest.TestCase):
+
+    def test_skip_works_with_collector(self):
+        verbosity = 2
+        stream = StringIO()
+        runner = TestRunner(stream=stream, verbosity=verbosity)
+        pwd = os.getcwd()
+
+        # we don't need to see our own warnings
+        warnings.filterwarnings(action='ignore',
+                                category=RuntimeWarning,
+                                module='nose.plugins.manager')
+
+        try:
+            os.chdir(os.path.join(support, 'issue038'))
+            unittest.TestProgram(
+                None, None,
+                argv=['test_collector', '-v', 'nose.collector'],
+                testRunner=runner)
+        except SystemExit:
+            pass
+        os.chdir(pwd)
+        out = stream.getvalue()
+        assert runner.result.wasSuccessful()
+        assert 'SKIP' in out, "SKIP not found in %s" % out
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_collector.pyc b/functional_tests/test_collector.pyc
new file mode 100644 (file)
index 0000000..5057bf2
Binary files /dev/null and b/functional_tests/test_collector.pyc differ
diff --git a/functional_tests/test_commands$py.class b/functional_tests/test_commands$py.class
new file mode 100644 (file)
index 0000000..1307d3f
Binary files /dev/null and b/functional_tests/test_commands$py.class differ
diff --git a/functional_tests/test_commands.py b/functional_tests/test_commands.py
new file mode 100644 (file)
index 0000000..682af04
--- /dev/null
@@ -0,0 +1,47 @@
+import os
+import sys
+import unittest
+from nose.plugins.skip import SkipTest
+from nose import commands
+from StringIO import StringIO
+
+support = os.path.join(
+    os.path.dirname(__file__), 'support', 'issue191')
+
+
+class TestCommands(unittest.TestCase):
+    def setUp(self):
+        try:
+            import setuptools
+        except ImportError:
+            raise SkipTest("setuptools not available")
+        self.dir = os.getcwd()
+        self.stderr = sys.stderr
+        os.chdir(support)
+
+    def tearDown(self):
+        os.chdir(self.dir)
+        sys.stderr = self.stderr
+    
+    def test_setup_nosetests_command_works(self):
+        from setuptools.dist import Distribution
+        buf = StringIO()
+        sys.stderr = buf
+        cmd = commands.nosetests(
+            Distribution(attrs={'script_name': 'setup.py',
+                                'package_dir': {'issue191': support}}))
+        cmd.finalize_options()
+        ## FIXME why doesn't Config see the chdir above?
+        print cmd._nosetests__config.workingDir
+        cmd._nosetests__config.workingDir = support
+        cmd._nosetests__config.stream = buf
+        try:
+            cmd.run()
+        except SystemExit, e:
+            self.assertFalse(e.args[0], buf.getvalue())
+        else:
+            self.fail("cmd.run() did not exit")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_commands.pyc b/functional_tests/test_commands.pyc
new file mode 100644 (file)
index 0000000..fa3bd3e
Binary files /dev/null and b/functional_tests/test_commands.pyc differ
diff --git a/functional_tests/test_config_files$py.class b/functional_tests/test_config_files$py.class
new file mode 100644 (file)
index 0000000..52401b4
Binary files /dev/null and b/functional_tests/test_config_files$py.class differ
diff --git a/functional_tests/test_config_files.py b/functional_tests/test_config_files.py
new file mode 100644 (file)
index 0000000..5f18f3a
--- /dev/null
@@ -0,0 +1,41 @@
+import logging
+import os
+import unittest
+from nose.config import Config
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestConfigurationFromFile(unittest.TestCase):
+    def setUp(self):
+        self.cfg_file = os.path.join(support, 'test.cfg')
+        # install mock root logger so that these tests don't stomp on
+        # the real logging config of the test runner
+        class MockLogger(logging.Logger):
+            root = logging.RootLogger(logging.WARNING)
+            manager = logging.Manager(root)
+        
+        self.real_logger = logging.Logger
+        self.real_root = logging.root
+        logging.Logger = MockLogger
+        logging.root = MockLogger.root
+
+    def tearDown(self):
+        # reset real root logger
+        logging.Logger = self.real_logger
+        logging.root = self.real_root
+        
+    def test_load_config_file(self):
+        c = Config(files=self.cfg_file)
+        c.configure(['test_load_config_file'])
+        self.assertEqual(c.verbosity, 10)
+
+    def test_config_file_set_by_arg(self):
+        c = Config()
+        c.configure(['test_config_file_set_by_arg',
+                     '-c', self.cfg_file, '-v'])
+        # 10 from file, 1 more from cmd line
+        self.assertEqual(c.verbosity, 11)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_config_files.pyc b/functional_tests/test_config_files.pyc
new file mode 100644 (file)
index 0000000..935ff0d
Binary files /dev/null and b/functional_tests/test_config_files.pyc differ
diff --git a/functional_tests/test_doctest_plugin$py.class b/functional_tests/test_doctest_plugin$py.class
new file mode 100644 (file)
index 0000000..a1b2cee
Binary files /dev/null and b/functional_tests/test_doctest_plugin$py.class differ
diff --git a/functional_tests/test_doctest_plugin.py b/functional_tests/test_doctest_plugin.py
new file mode 100644 (file)
index 0000000..c91ecc5
--- /dev/null
@@ -0,0 +1,44 @@
+import os
+import unittest
+from nose.plugins.doctests import Doctest
+from nose.plugins import PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestDoctestPlugin(PluginTester, unittest.TestCase):
+    activate = '--with-doctest'
+    args = ['-v']
+    plugins = [Doctest()]
+    suitepath = os.path.join(support, 'dtt')
+    
+    def runTest(self):
+        print str(self.output)
+        
+        assert 'Doctest: some_mod ... ok' in self.output
+        assert 'Doctest: some_mod.foo ... ok' in self.output
+        assert 'Ran 2 tests' in self.output
+        assert str(self.output).strip().endswith('OK')
+
+
+class TestDoctestFiles(PluginTester, unittest.TestCase):
+    activate = '--with-doctest'
+    args = ['-v', '--doctest-extension=.txt']
+    plugins = [Doctest()]
+    suitepath = os.path.join(support, 'dtt', 'docs')
+    
+    def runTest(self):
+        print str(self.output)
+
+        expect = [
+            'Doctest: doc.txt ... ok',
+            'Doctest: errdoc.txt ... FAIL'
+            ]
+        for line in self.output:
+            if not line.strip():
+                continue
+            if line.startswith('='):
+                break
+            self.assertEqual(line.strip(), expect.pop(0))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_doctest_plugin.pyc b/functional_tests/test_doctest_plugin.pyc
new file mode 100644 (file)
index 0000000..9a175e1
Binary files /dev/null and b/functional_tests/test_doctest_plugin.pyc differ
diff --git a/functional_tests/test_entrypoints$py.class b/functional_tests/test_entrypoints$py.class
new file mode 100644 (file)
index 0000000..cd18562
Binary files /dev/null and b/functional_tests/test_entrypoints$py.class differ
diff --git a/functional_tests/test_entrypoints.py b/functional_tests/test_entrypoints.py
new file mode 100644 (file)
index 0000000..a57f218
--- /dev/null
@@ -0,0 +1,17 @@
+import os
+import sys
+from nose.exc import SkipTest
+
+try:
+    from pkg_resources import EntryPoint
+except ImportError:
+    raise SkipTest("No setuptools available; skipping")
+
+here = os.path.dirname(__file__)
+support = os.path.join(here, 'support')
+ep = os.path.join(support, 'ep')
+
+
+def test_plugin_entrypoint_is_loadable():
+    epfile = os.path.join(ep, 'Some_plugin.egg-info', 'entry_points.txt')
+    assert EntryPoint.parse_map(open(epfile, 'r').readlines())
diff --git a/functional_tests/test_entrypoints.pyc b/functional_tests/test_entrypoints.pyc
new file mode 100644 (file)
index 0000000..94315b7
Binary files /dev/null and b/functional_tests/test_entrypoints.pyc differ
diff --git a/functional_tests/test_failuredetail_plugin$py.class b/functional_tests/test_failuredetail_plugin$py.class
new file mode 100644 (file)
index 0000000..36d2933
Binary files /dev/null and b/functional_tests/test_failuredetail_plugin$py.class differ
diff --git a/functional_tests/test_failuredetail_plugin.py b/functional_tests/test_failuredetail_plugin.py
new file mode 100644 (file)
index 0000000..284cf49
--- /dev/null
@@ -0,0 +1,50 @@
+import os
+import sys
+import unittest
+from nose.plugins.failuredetail import FailureDetail
+from nose.plugins.capture import Capture
+from nose.plugins import PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestFailureDetail(PluginTester, unittest.TestCase):
+    activate = "-d"
+    args = ['-v']
+    plugins = [FailureDetail()]
+    suitepath = os.path.join(support, 'fdp')
+
+    def runTest(self):
+        print '*' * 70
+        print str(self.output)
+        print '*' * 70
+
+        expect = \
+        'AssertionError: a is not 4\n'
+        '    print "Hello"\n'
+        '    2 = 2\n'
+        '>>  assert 2 == 4, "a is not 4"'
+
+        assert expect in self.output
+
+
+class TestFailureDetailWithCapture(PluginTester, unittest.TestCase):
+    activate = "-d"
+    args = ['-v']
+    plugins = [FailureDetail(), Capture()]
+    suitepath = os.path.join(support, 'fdp/test_fdp_no_capt.py')
+
+    def runTest(self):
+        print '*' * 70
+        print str(self.output)
+        print '*' * 70
+
+        expect = \
+        'AssertionError: a is not 4\n'
+        '    print "Hello"\n'
+        '    2 = 2\n'
+        '>>  assert 2 == 4, "a is not 4"'
+
+        assert expect in self.output
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_failuredetail_plugin.pyc b/functional_tests/test_failuredetail_plugin.pyc
new file mode 100644 (file)
index 0000000..387d58e
Binary files /dev/null and b/functional_tests/test_failuredetail_plugin.pyc differ
diff --git a/functional_tests/test_generator_fixtures$py.class b/functional_tests/test_generator_fixtures$py.class
new file mode 100644 (file)
index 0000000..38db048
Binary files /dev/null and b/functional_tests/test_generator_fixtures$py.class differ
diff --git a/functional_tests/test_generator_fixtures.py b/functional_tests/test_generator_fixtures.py
new file mode 100644 (file)
index 0000000..3240141
--- /dev/null
@@ -0,0 +1,58 @@
+from nose.tools import eq_
+called = []
+
+def outer_setup():
+    called.append('outer_setup')
+
+def outer_teardown():
+    called.append('outer_teardown')
+
+def inner_setup():
+    called.append('inner_setup')
+
+def inner_teardown():
+    called.append('inner_teardown')
+
+def test_gen():
+    called[:] = []
+    for i in range(0, 5):
+        yield check, i
+        
+def check(i):
+    expect = ['outer_setup']
+    for x in range(0, i):
+        expect.append('inner_setup')
+        expect.append('inner_teardown')
+    expect.append('inner_setup')
+    eq_(called, expect)
+
+    
+test_gen.setup = outer_setup
+test_gen.teardown = outer_teardown
+check.setup = inner_setup
+check.teardown = inner_teardown
+
+
+class TestClass(object):
+    def setup(self):
+        print "setup called in", self
+        self.called = ['setup']
+
+    def teardown(self):
+        print "teardown called in", self
+        eq_(self.called, ['setup'])
+        self.called.append('teardown')
+
+    def test(self):
+        print "test called in", self
+        for i in range(0, 5):
+            yield self.check, i
+
+    def check(self, i):
+        print "check called in", self
+        expect = ['setup']
+        #for x in range(0, i):
+        #    expect.append('setup')
+        #    expect.append('teardown')
+        #expect.append('setup')
+        eq_(self.called, expect)
diff --git a/functional_tests/test_generator_fixtures.pyc b/functional_tests/test_generator_fixtures.pyc
new file mode 100644 (file)
index 0000000..314babf
Binary files /dev/null and b/functional_tests/test_generator_fixtures.pyc differ
diff --git a/functional_tests/test_id_plugin$py.class b/functional_tests/test_id_plugin$py.class
new file mode 100644 (file)
index 0000000..8c1e034
Binary files /dev/null and b/functional_tests/test_id_plugin$py.class differ
diff --git a/functional_tests/test_id_plugin.py b/functional_tests/test_id_plugin.py
new file mode 100644 (file)
index 0000000..7b3d39d
--- /dev/null
@@ -0,0 +1,261 @@
+import os
+import re
+import sys
+import tempfile
+import unittest
+
+from nose.plugins import PluginTester
+from nose.plugins.builtin import Doctest
+from nose.plugins.builtin import TestId
+from cPickle import dump, load
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+idfile = tempfile.mktemp()
+test_part = re.compile(r'(#\d+)? +([^(]+)')
+
+def teardown():
+     try:
+         os.remove(idfile)
+     except OSError:
+         pass
+
+class TestDiscoveryMode(PluginTester, unittest.TestCase):
+    activate = '--with-id'
+    plugins = [TestId()]
+    args = ['-v', '--id-file=%s' % idfile]
+    suitepath = os.path.join(support, 'idp')
+
+    def test_ids_added_to_output(self):
+        #print '>' * 70
+        #print str(self.output)
+        #print '<' * 70
+
+        for line in self.output:
+            if line.startswith('='):
+                break
+            if not line.strip():
+                continue
+            if 'test_gen' in line and not '(0,)' in line:
+                assert not line.startswith('#'), \
+                       "Generated test line '%s' should not have id" % line
+            else:
+                assert line.startswith('#'), \
+                       "Test line '%s' missing id" % line.strip()
+            
+    # test that id file is written
+    def test_id_file_contains_ids_seen(self):
+        assert os.path.exists(idfile)
+        fh = open(idfile, 'rb')
+        ids = load(fh)['ids']
+        fh.close()
+        assert ids
+        assert ids.keys()
+        self.assertEqual(map(int, ids.keys()), ids.keys())
+        assert ids.values()
+
+
+class TestLoadNamesMode(PluginTester, unittest.TestCase):
+    """NOTE that this test passing requires the previous test case to
+    be run! (Otherwise the ids file will not exist)
+    """
+    activate = '--with-id'
+    plugins = [TestId()]
+    # Not a typo: # is optional before ids
+    args = ['-v', '--id-file=%s' % idfile, '2', '#5']
+    suitepath = None
+
+    def makeSuite(self):
+        return None
+
+    def test_load_ids(self):
+        #print '#' * 70
+        #print str(self.output)
+        #print '#' * 70
+
+        for line in self.output:
+            if line.startswith('#'):
+                assert line.startswith('#2 ') or line.startswith('#5 '), \
+                       "Unexpected test line '%s'" % line
+        assert os.path.exists(idfile)
+        fh = open(idfile, 'rb')
+        ids = load(fh)
+        fh.close()
+        assert ids
+        assert ids.keys()
+        ids = ids['ids']
+        self.assertEqual(filter(lambda i: int(i), ids.keys()), ids.keys())
+        assert len(ids.keys()) > 2
+
+
+class TestLoadNamesMode_2(PluginTester, unittest.TestCase):
+    """NOTE that this test passing requires the previous test case to
+    be run! (Otherwise the ids file will not exist)
+
+    Tests that generators still only have id on one line
+    """
+    activate = '--with-id'
+    plugins = [TestId()]
+    args = ['-v', '--id-file=%s' % idfile, '9']
+    suitepath = None
+
+    def makeSuite(self):
+        return None
+
+    def test_load_ids(self):
+        #print '%' * 70
+        #print str(self.output)
+        #print '%' * 70
+
+        count = 0
+        for line in self.output:
+            if line.startswith('#'):
+                count += 1
+        self.assertEqual(count, 1)
+        teardown()
+
+
+class TestWithDoctest_1(PluginTester, unittest.TestCase):
+    activate = '--with-id'
+    plugins = [Doctest(), TestId()]
+    args = ['-v', '--id-file=%s' % idfile, '--with-doctest']
+    suitepath = os.path.join(support, 'idp')
+
+    def test_doctests_get_ids(self):
+        #print '>' * 70
+        #print str(self.output)
+        #print '>' * 70
+
+        last = None
+        for line in self.output:
+            if line.startswith('='):
+                break
+            if not line.strip():
+                continue
+            # assert line startswith # or test part matches last
+            m = test_part.match(line.rstrip())
+            assert m
+            idx, name = m.groups()
+            assert idx or last is None or name == last, \
+                   "Expected an id on line %s" % line.strip()
+            last = name
+            
+        fh = open(idfile, 'rb')
+        ids = load(fh)['ids']
+        fh.close()
+        for key, (file, mod, call) in ids.items():
+            assert mod != 'doctest', \
+                   "Doctest test was incorrectly identified as being part of "\
+                   "the doctest module itself (#%s)" % key
+
+
+class TestWithDoctest_2(PluginTester, unittest.TestCase):
+    activate = '--with-id'
+    plugins = [Doctest(), TestId()]
+    args = ['-v', '--id-file=%s' % idfile, '--with-doctest', '#2']
+    suitepath = None
+
+    def setUp(self):
+        sys.path.insert(0, os.path.join(support, 'idp'))
+        super(TestWithDoctest_2, self).setUp()
+
+    def tearDown(self):
+        sys.path.remove(os.path.join(support, 'idp'))
+        super(TestWithDoctest_2, self).tearDown()
+
+    def makeSuite(self):
+        return None
+
+    def test_load_ids_doctest(self):
+        print '*' * 70
+        print str(self.output)
+        print '*' * 70
+
+        assert 'Doctest: exm.add_one ... FAIL' in self.output
+        
+        count = 0
+        for line in self.output:
+            if line.startswith('#'):
+                count += 1
+        self.assertEqual(count, 1)
+        teardown()
+        
+
+class TestWithDoctestFileTests_1(PluginTester, unittest.TestCase):
+    activate = '--with-id'
+    plugins = [Doctest(), TestId()]
+    args = ['-v', '--id-file=%s' % idfile, '--with-doctest',
+            '--doctest-extension=.txt']
+    suitepath = os.path.join(support, 'dtt', 'docs')
+
+    def test_docfile_tests_get_ids(self):
+        print '>' * 70
+        print str(self.output)
+        print '>' * 70
+
+        last = None
+        for line in self.output:
+            if line.startswith('='):
+                break
+            # assert line startswith # or test part matches last
+            if not line.strip():
+                continue
+            m = test_part.match(line.rstrip())
+            assert m, "line %s does not match expected pattern" % line.strip()
+            idx, name = m.groups()
+            assert idx or last is None or name == last, \
+                   "Expected an id on line %s" % line.strip()
+            
+            last = name
+        fh = open(idfile, 'rb')
+        ids = load(fh)['ids']
+        fh.close()
+        for key, (file, mod, call) in ids.items():
+            assert mod != 'doctest', \
+                   "Doctest test was incorrectly identified as being part of "\
+                   "the doctest module itself (#%s)" % key    
+
+
+class TestWithDoctestFileTests_2(PluginTester, unittest.TestCase):
+    activate = '--with-id'
+    plugins = [Doctest(), TestId()]
+    args = ['-v', '--id-file=%s' % idfile, '--with-doctest',
+            '--doctest-extension=.txt', '2']
+    suitepath = None
+
+    def setUp(self):
+        sys.path.insert(0, os.path.join(support, 'dtt', 'docs'))
+        super(TestWithDoctestFileTests_2, self).setUp()
+
+    def tearDown(self):
+        sys.path.remove(os.path.join(support, 'dtt', 'docs'))
+        super(TestWithDoctestFileTests_2, self).tearDown()
+
+    def makeSuite(self):
+        return None
+
+    def test_load_from_name_id_docfile_test(self):
+        print '*' * 70
+        print str(self.output)
+        print '*' * 70
+
+        assert 'Doctest: errdoc.txt ... FAIL' in self.output
+        
+        count = 0
+        for line in self.output:
+            if line.startswith('#'):
+                count += 1
+        assert count == 1
+        teardown()
+        
+        
+if __name__ == '__main__':
+    import logging
+    logging.basicConfig()
+    l = logging.getLogger('nose.plugins.testid')
+    l.setLevel(logging.DEBUG)
+    
+    try:
+        unittest.main()
+    finally:
+        teardown()
+    
diff --git a/functional_tests/test_id_plugin.pyc b/functional_tests/test_id_plugin.pyc
new file mode 100644 (file)
index 0000000..7ba28e9
Binary files /dev/null and b/functional_tests/test_id_plugin.pyc differ
diff --git a/functional_tests/test_importer$py.class b/functional_tests/test_importer$py.class
new file mode 100644 (file)
index 0000000..84c0ec7
Binary files /dev/null and b/functional_tests/test_importer$py.class differ
diff --git a/functional_tests/test_importer.py b/functional_tests/test_importer.py
new file mode 100644 (file)
index 0000000..c24fdcf
--- /dev/null
@@ -0,0 +1,168 @@
+import os
+import sys
+import unittest
+from nose.importer import Importer
+
+
+class TestImporter(unittest.TestCase):
+
+    def setUp(self):
+        self.dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
+                                                 'support'))
+        self.imp = Importer()
+        self._mods = sys.modules.copy()
+        self._path = sys.path[:]
+        sys.modules.pop('mod', None)
+        sys.modules.pop('pak', None)
+        sys.modules.pop('pak.mod', None)
+        sys.modules.pop('pak.sub', None)
+        
+    def tearDown(self):
+        to_del = [ m for m in sys.modules.keys() if
+                   m not in self._mods ]
+        if to_del:
+            for mod in to_del:
+                del sys.modules[mod]
+        sys.modules.update(self._mods)
+        sys.path = self._path[:]
+
+    def test_import_from_dir(self):
+        imp = self.imp
+
+        d1 = os.path.join(self.dir, 'dir1')
+        d2 = os.path.join(self.dir, 'dir2')
+        
+        # simple name        
+        m1 = imp.importFromDir(d1, 'mod')
+        m2 = imp.importFromDir(d2, 'mod')
+        self.assertNotEqual(m1, m2)
+        self.assertNotEqual(m1.__file__, m2.__file__)
+
+        # dotted name
+        p1 = imp.importFromDir(d1, 'pak.mod')
+        p2 = imp.importFromDir(d2, 'pak.mod')
+        self.assertNotEqual(p1, p2)
+        self.assertNotEqual(p1.__file__, p2.__file__)
+
+    def test_import_from_path(self):
+        imp = self.imp
+
+        jn = os.path.join
+        d1 = jn(self.dir, 'dir1')
+        d2 = jn(self.dir, 'dir2')
+        
+        # simple name        
+        m1 = imp.importFromPath(jn(d1, 'mod.py'), 'mod')
+        m2 = imp.importFromPath(jn(d2, 'mod.py'), 'mod')
+        self.assertNotEqual(m1, m2)
+        self.assertNotEqual(m1.__file__, m2.__file__)
+
+        # dotted name
+        p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
+        p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
+        self.assertNotEqual(p1, p2)
+        self.assertNotEqual(p1.__file__, p2.__file__)
+
+        # simple name -- package
+        sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
+        sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
+        self.assertNotEqual(sp1, sp2)
+        assert sp1.__path__
+        assert sp2.__path__
+        self.assertNotEqual(sp1.__path__, sp2.__path__)
+
+        # dotted name -- package
+        dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
+        dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
+        self.assertNotEqual(dp1, dp2)
+        assert dp1.__path__
+        assert dp2.__path__
+        self.assertNotEqual(dp1.__path__, dp2.__path__)
+
+    def test_import_sets_intermediate_modules(self):
+        imp = self.imp
+        path = os.path.join(self.dir,
+                            'package2', 'test_pak', 'test_sub', 'test_mod.py')
+        mod = imp.importFromPath(path, 'test_pak.test_sub.test_mod')
+        print mod, dir(mod)
+        assert 'test_pak' in sys.modules, 'test_pak was not imported?'
+        test_pak = sys.modules['test_pak']
+        assert hasattr(test_pak, 'test_sub'), "test_pak.test_sub was not set"
+        
+    def test_cached_no_reload(self):
+        imp = self.imp
+        d1 = os.path.join(self.dir, 'dir1')
+        m1 = imp.importFromDir(d1, 'mod')
+        m2 = imp.importFromDir(d1, 'mod')        
+        assert m1 is m2, "%s is not %s" % (m1, m2)
+
+    def test_cached_no_reload_dotted(self):
+        imp = self.imp
+        d1 = os.path.join(self.dir, 'dir1')
+        p1 = imp.importFromDir(d1, 'pak.mod')
+        p2 = imp.importFromDir(d1, 'pak.mod')
+        assert p1 is p2, "%s is not %s" % (p1, p2)
+
+    def test_import_sets_sys_modules(self):
+        imp = self.imp
+        d1 = os.path.join(self.dir, 'dir1')
+        p1 = imp.importFromDir(d1, 'pak.mod')
+        assert sys.modules['pak.mod'] is p1, "pak.mod not in sys.modules"
+        assert sys.modules['pak'], "pak not in sys.modules"
+        assert sys.modules['pak'].mod is p1, \
+               "sys.modules['pak'].mod is not the module we loaded"
+
+    def test_failed_import_raises_import_error(self):
+        imp = self.imp
+        def bad_import():
+            imp.importFromPath(self.dir, 'no.such.module')
+        self.assertRaises(ImportError, bad_import)
+
+    def test_sys_modules_same_path_no_reload(self):
+        imp = self.imp
+
+        d1 = os.path.join(self.dir, 'dir1')
+        d2 = os.path.join(self.dir, 'dir2')
+        sys.path.insert(0, d1)
+        mod_sys_imported = __import__('mod')
+        mod_nose_imported = imp.importFromDir(d1, 'mod')
+        assert mod_nose_imported is mod_sys_imported, \
+               "nose reimported a module in sys.modules from the same path"
+
+        mod_nose_imported2 = imp.importFromDir(d2, 'mod')
+        assert mod_nose_imported2 != mod_sys_imported, \
+               "nose failed to reimport same name, different dir"
+
+    def test_import_pkg_from_path_fpw(self):
+        imp = self.imp
+        imp.config.firstPackageWins = True
+        jn = os.path.join
+        d1 = jn(self.dir, 'dir1')
+        d2 = jn(self.dir, 'dir2')
+        
+        # dotted name
+        p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
+        p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
+        self.assertEqual(p1, p2)
+        self.assertEqual(p1.__file__, p2.__file__)
+
+        # simple name -- package
+        sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
+        sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
+        self.assertEqual(sp1, sp2)
+        assert sp1.__path__
+        assert sp2.__path__
+        self.assertEqual(sp1.__path__, sp2.__path__)
+
+        # dotted name -- package
+        dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
+        dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
+        self.assertEqual(dp1, dp2)
+        assert dp1.__path__
+        assert dp2.__path__
+        self.assertEqual(dp1.__path__, dp2.__path__)
+        
+if __name__ == '__main__':
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/functional_tests/test_importer.pyc b/functional_tests/test_importer.pyc
new file mode 100644 (file)
index 0000000..258a612
Binary files /dev/null and b/functional_tests/test_importer.pyc differ
diff --git a/functional_tests/test_isolate_plugin$py.class b/functional_tests/test_isolate_plugin$py.class
new file mode 100644 (file)
index 0000000..0c8c05a
Binary files /dev/null and b/functional_tests/test_isolate_plugin$py.class differ
diff --git a/functional_tests/test_isolate_plugin.py b/functional_tests/test_isolate_plugin.py
new file mode 100644 (file)
index 0000000..087dcaa
--- /dev/null
@@ -0,0 +1,57 @@
+import os
+import sys
+import unittest
+from nose.plugins.isolate import IsolationPlugin
+from nose.plugins import PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestDiscovery(PluginTester, unittest.TestCase):
+    activate = '--with-isolation'
+    args = ['-v']
+    plugins = [IsolationPlugin()]
+    suitepath = os.path.join(support, 'ipt')
+    
+    def runTest(self):
+        print str(self.output)
+
+        for line in self.output:
+            if not line.strip():
+                continue
+            if line.startswith('-'):
+                break
+            assert line.strip().endswith('ok'), \
+                   "Failed test: %s" % line.strip()
+
+
+class TestLoadFromNames(PluginTester, unittest.TestCase):
+    activate = '--with-isolation'
+    args = ['-v', 'test1/tests.py', 'test2/tests.py']
+    plugins = [IsolationPlugin()]
+    suitepath = None
+
+    def setUp(self):
+        self._dir = os.getcwd()
+        os.chdir(os.path.join(support, 'ipt'))
+        super(TestLoadFromNames, self).setUp()
+        
+    def tearDown(self):
+        os.chdir(self._dir)
+        super(TestLoadFromNames, self).tearDown()
+
+    def makeSuite(self):
+        return None
+    
+    def runTest(self):
+        print str(self.output)
+
+        for line in self.output:
+            if not line.strip():
+                continue
+            if line.startswith('-'):
+                break
+            assert line.strip().endswith('ok'), \
+                   "Failed test: %s" % line.strip()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_isolate_plugin.pyc b/functional_tests/test_isolate_plugin.pyc
new file mode 100644 (file)
index 0000000..2361b21
Binary files /dev/null and b/functional_tests/test_isolate_plugin.pyc differ
diff --git a/functional_tests/test_issue120/support/some_test$py.class b/functional_tests/test_issue120/support/some_test$py.class
new file mode 100644 (file)
index 0000000..6cb8e68
Binary files /dev/null and b/functional_tests/test_issue120/support/some_test$py.class differ
diff --git a/functional_tests/test_issue120/support/some_test.py b/functional_tests/test_issue120/support/some_test.py
new file mode 100644 (file)
index 0000000..9947266
--- /dev/null
@@ -0,0 +1,3 @@
+def some_test():
+    pass
+
diff --git a/functional_tests/test_issue120/support/some_test.pyc b/functional_tests/test_issue120/support/some_test.pyc
new file mode 100644 (file)
index 0000000..4872b18
Binary files /dev/null and b/functional_tests/test_issue120/support/some_test.pyc differ
diff --git a/functional_tests/test_issue120/test_named_test_with_doctest.rst b/functional_tests/test_issue120/test_named_test_with_doctest.rst
new file mode 100644 (file)
index 0000000..f05deb7
--- /dev/null
@@ -0,0 +1,25 @@
+Naming a non-existent test using the colon syntax (foo.py:my_test)
+with plugin doctests enabled used to cause a failure with a ValueError
+from module doctest, losing the original failure (failure to find the
+test).
+
+    >>> import os
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> from nose.plugins.doctests import Doctest
+
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> test_name = os.path.join(support, 'some_test.py') + ':nonexistent'
+    >>> run(argv=['nosetests', '--with-doctest', test_name],
+    ...     plugins=[Doctest()])
+    E
+    ======================================================================
+    ERROR: Failure: ValueError (No such test nonexistent)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    ValueError: No such test nonexistent
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
diff --git a/functional_tests/test_issue_072$py.class b/functional_tests/test_issue_072$py.class
new file mode 100644 (file)
index 0000000..5c8f645
Binary files /dev/null and b/functional_tests/test_issue_072$py.class differ
diff --git a/functional_tests/test_issue_072.py b/functional_tests/test_issue_072.py
new file mode 100644 (file)
index 0000000..3848f6f
--- /dev/null
@@ -0,0 +1,45 @@
+import os
+import sys
+import unittest
+
+from nose.plugins import PluginTester
+from nose.plugins.builtin import FailureDetail, Capture
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+class TestFailureDetailWorks(PluginTester, unittest.TestCase):
+    activate = '-d'
+    plugins = [FailureDetail()]
+    args = ['-v']
+    suitepath = os.path.join(support, 'issue072')
+
+    def test_assert_info_in_output(self):
+        print
+        print '!' * 70
+        print str(self.output)
+        print '!' * 70
+        print
+        assert '>>  assert 4 == 2' in str(self.output)
+
+class TestFailureDetailWorksWhenChained(PluginTester, unittest.TestCase):
+    activate = '-d'
+    plugins = [FailureDetail(), Capture()]
+    args = ['-v']
+    suitepath = os.path.join(support, 'issue072')
+
+    def test_assert_info_and_capt_stdout_in_output(self):
+        out = str(self.output)
+        print
+        print 'x' * 70
+        print out
+        print 'x' * 70
+        print
+        
+        assert '>>  assert 4 == 2' in out, \
+               "Assert info not found in chained output"
+        assert 'something' in out, \
+               "Captured stdout not found in chained output"
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_issue_072.pyc b/functional_tests/test_issue_072.pyc
new file mode 100644 (file)
index 0000000..7f3ac45
Binary files /dev/null and b/functional_tests/test_issue_072.pyc differ
diff --git a/functional_tests/test_issue_082$py.class b/functional_tests/test_issue_082$py.class
new file mode 100644 (file)
index 0000000..5861e10
Binary files /dev/null and b/functional_tests/test_issue_082$py.class differ
diff --git a/functional_tests/test_issue_082.py b/functional_tests/test_issue_082.py
new file mode 100644 (file)
index 0000000..06fa019
--- /dev/null
@@ -0,0 +1,74 @@
+import os
+import re
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+import sys
+import unittest
+
+from nose.plugins import Plugin, PluginTester
+from nose.plugins.builtin import FailureDetail, Capture, Doctest
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+class IncludeUnderscoreFilesPlugin(Plugin):
+
+    # Note that this is purely for purposes of testing nose itself, and is
+    # not intended to be a useful plugin.  In particular, the rules it
+    # applies for _*.py files differ from the nose defaults (e.g. the
+    # --testmatch option is ignored).
+
+    name = "underscorefiles"
+
+    def wantFile(self, file):
+        base = os.path.basename(file)
+        dummy, ext = os.path.splitext(base)
+        pysrc = ext == '.py'
+        if pysrc and os.path.basename(file).startswith("_"):
+            return True
+
+    def wantDirectory(self, dirname):
+        if os.path.basename(dirname).startswith("_"):
+            return True
+
+
+class TestIncludeUnderscoreFiles(PluginTester, unittest.TestCase):
+    activate = '--with-underscorefiles'
+    plugins = [IncludeUnderscoreFilesPlugin(), Doctest()]
+    args = ['-v', '--with-doctest']
+    suitepath = os.path.join(support, 'issue082')
+    ignoreFiles = (re.compile(r'^\.'),
+                   # we want _*.py, but don't want e.g. __init__.py, since that
+                   # appears to cause infinite recursion at the moment
+                   re.compile(r'^__'),
+                   re.compile(r'^setup\.py$')
+                   )
+
+    def test_assert_info_in_output(self):
+        print self.output
+        # In future, all four test cases will be run.  Backwards-compatibility
+        # means that can't be done in nose 0.10.
+        assert '_mypackage._eggs' not in str(self.output)
+        assert '_mypackage.bacon' not in str(self.output)
+        assert 'Doctest: mypublicpackage._foo ... FAIL' in str(self.output)
+        assert 'Doctest: mypublicpackage.bar ... FAIL' in str(self.output)
+
+
+class TestExcludeUnderscoreFilesByDefault(PluginTester, unittest.TestCase):
+    activate = '-v'
+    plugins = [Doctest()]
+    args = ['--with-doctest']
+    suitepath = os.path.join(support, 'issue082')
+
+    def test_assert_info_in_output(self):
+        print self.output
+        assert '_mypackage._eggs' not in str(self.output)
+        assert '_mypackage.bacon' not in str(self.output)
+        assert 'mypublicpackage._foo' not in str(self.output)
+        assert 'Doctest: mypublicpackage.bar ... FAIL' in str(self.output)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_issue_082.pyc b/functional_tests/test_issue_082.pyc
new file mode 100644 (file)
index 0000000..5304403
Binary files /dev/null and b/functional_tests/test_issue_082.pyc differ
diff --git a/functional_tests/test_issue_408$py.class b/functional_tests/test_issue_408$py.class
new file mode 100644 (file)
index 0000000..039b42b
Binary files /dev/null and b/functional_tests/test_issue_408$py.class differ
diff --git a/functional_tests/test_issue_408.py b/functional_tests/test_issue_408.py
new file mode 100644 (file)
index 0000000..17ea65c
--- /dev/null
@@ -0,0 +1,25 @@
+import os
+import unittest
+
+from nose.plugins import Plugin, PluginTester
+#from nose.plugins.builtin import FailureDetail, Capture, Doctest
+
+support = os.path.join(os.path.dirname(__file__), 'support', 'issue408')
+
+class TestIssue408(PluginTester, unittest.TestCase):
+    args = ['--where='+support, 'test:testa.test1', 'test:testa.test2', 'test:testb.test1', 'test:testb.test2']
+    activate = "-v"
+
+    def makeSuite(self):
+        # make PluginTester happy, because we don't specify suitepath, we
+        # have to implement this function
+        return None
+
+    def test_no_failure(self):
+        output = str(self.output)
+        assert 'FAIL:' not in output
+        assert 'AssertionError' not in output
+        assert 'OK' in output
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_issue_408.pyc b/functional_tests/test_issue_408.pyc
new file mode 100644 (file)
index 0000000..a30719d
Binary files /dev/null and b/functional_tests/test_issue_408.pyc differ
diff --git a/functional_tests/test_load_tests_from_test_case$py.class b/functional_tests/test_load_tests_from_test_case$py.class
new file mode 100644 (file)
index 0000000..f5db34e
Binary files /dev/null and b/functional_tests/test_load_tests_from_test_case$py.class differ
diff --git a/functional_tests/test_load_tests_from_test_case.py b/functional_tests/test_load_tests_from_test_case.py
new file mode 100644 (file)
index 0000000..42f8563
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+Tests that plugins can override loadTestsFromTestCase
+"""
+import os
+import unittest
+from nose import loader
+from nose.plugins import PluginTester
+from nose.plugins.base import Plugin
+
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+class NoFixturePlug(Plugin):
+    enabled = True
+
+    def options(self, parser, env):
+        print "options"        
+        pass
+    
+    def configure(self, options, conf):
+        print "configure"
+        pass
+
+    def loadTestsFromTestCase(self, testCaseClass):
+        print "Called!"
+        class Derived(testCaseClass):
+            def setUp(self):
+                pass
+            def tearDown(self):
+                pass
+        # must use nose loader here because the default loader in 2.3
+        # won't load tests from base classes
+        l = loader.TestLoader()
+        return l.loadTestsFromTestCase(Derived)
+
+
+class TestLoadTestsFromTestCaseHook(PluginTester, unittest.TestCase):
+
+    activate = '-v'
+    args = []
+    plugins = [NoFixturePlug()]
+    suitepath = os.path.join(support, 'ltftc')
+
+    def runTest(self):
+        expect = [
+            'test_value (%s.Derived) ... ERROR' % __name__,
+            'test_value (tests.Tests) ... ok']
+        print str(self.output)
+        for line in self.output:
+            if expect:
+                self.assertEqual(line.strip(), expect.pop(0))
+                
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_load_tests_from_test_case.pyc b/functional_tests/test_load_tests_from_test_case.pyc
new file mode 100644 (file)
index 0000000..0044ab8
Binary files /dev/null and b/functional_tests/test_load_tests_from_test_case.pyc differ
diff --git a/functional_tests/test_loader$py.class b/functional_tests/test_loader$py.class
new file mode 100644 (file)
index 0000000..da4d7bd
Binary files /dev/null and b/functional_tests/test_loader$py.class differ
diff --git a/functional_tests/test_loader.py b/functional_tests/test_loader.py
new file mode 100644 (file)
index 0000000..6f73559
--- /dev/null
@@ -0,0 +1,451 @@
+import os
+import sys
+import unittest
+from difflib import ndiff
+from cStringIO import StringIO
+
+from nose.config import Config
+from nose.plugins.manager import PluginManager
+from nose.plugins.skip import Skip
+from nose import loader
+from nose import suite
+from nose.result import _TextTestResult
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+
+support = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
+
+class TestNoseTestLoader(unittest.TestCase):
+
+    def setUp(self):
+        self._mods = sys.modules.copy()
+        suite.ContextSuiteFactory.suiteClass = TreePrintContextSuite
+
+    def tearDown(self):
+        to_del = [ m for m in sys.modules.keys() if
+                   m not in self._mods ]
+        if to_del:
+            for mod in to_del:
+                del sys.modules[mod]
+        sys.modules.update(self._mods)
+        suite.ContextSuiteFactory.suiteClass = suite.ContextSuite
+
+    def test_load_from_name_file(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package1')
+        l = loader.TestLoader(workingDir=wd)
+
+        file_suite = l.loadTestsFromName('tests/test_example_function.py')
+        file_suite(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+
+    def test_load_from_name_dot(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package1')
+        l = loader.TestLoader(workingDir=wd)
+        dir_suite = l.loadTestsFromName('.')
+        dir_suite(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+
+    def test_load_from_name_file_callable(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package1')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromName(
+            'tests/test_example_function.py:test_times_two')
+        suite(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        self.assertEqual(res.testsRun, 1)
+
+    def test_fixture_context(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        dir_suite = l.loadTestsFromName('.')
+        dir_suite(res)
+
+        m = sys.modules['test_pak']
+        print "test pak state", m.state
+
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        self.assertEqual(res.testsRun, 5)
+
+        # Expected order of calls
+        expect = ['test_pak.setup',
+                  'test_pak.test_mod.setup',
+                  'test_pak.test_mod.test_add',
+                  'test_pak.test_mod.test_minus',
+                  'test_pak.test_mod.teardown',
+                  'test_pak.test_sub.setup',
+                  'test_pak.test_sub.test_mod.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.setup_class',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_div',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_two_two',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown_class',
+                  'test_pak.test_sub.test_mod.test',
+                  'test_pak.test_sub.test_mod.teardown',
+                  'test_pak.test_sub.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(len(m.state), len(expect))
+        for item in m.state:
+            self.assertEqual(item, expect.pop(0))
+
+    def test_fixture_context_name_is_module(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromName('test_pak.test_mod')
+        suite(res)
+
+        assert 'test_pak' in sys.modules, \
+               "Context did not load test_pak"
+        m = sys.modules['test_pak']
+        print "test pak state", m.state
+        expect = ['test_pak.setup',
+                  'test_pak.test_mod.setup',
+                  'test_pak.test_mod.test_add',
+                  'test_pak.test_mod.test_minus',
+                  'test_pak.test_mod.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(len(m.state), len(expect))
+        for item in m.state:
+            self.assertEqual(item, expect.pop(0))
+
+    def test_fixture_context_name_is_test_function(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromName('test_pak.test_mod:test_add')
+        suite(res)
+
+        assert 'test_pak' in sys.modules, \
+               "Context did not load test_pak"
+        m = sys.modules['test_pak']
+        print "test pak state", m.state
+        expect = ['test_pak.setup',
+                  'test_pak.test_mod.setup',
+                  'test_pak.test_mod.test_add',
+                  'test_pak.test_mod.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(len(m.state), len(expect))
+        for item in m.state:
+            self.assertEqual(item, expect.pop(0))
+
+    def test_fixture_context_name_is_test_class(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromName(
+            'test_pak.test_sub.test_mod:TestMaths')
+        suite(res)
+
+        assert 'test_pak' in sys.modules, \
+               "Context did not load test_pak"
+        m = sys.modules['test_pak']
+        # print "test pak state", m.state
+        expect = ['test_pak.setup',
+                  'test_pak.test_sub.setup',
+                  'test_pak.test_sub.test_mod.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.setup_class',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_div',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_two_two',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown_class',
+                  'test_pak.test_sub.test_mod.teardown',
+                  'test_pak.test_sub.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(m.state, expect, diff(expect, m.state))
+
+    def test_fixture_context_name_is_test_class_test(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromName(
+            'test_pak.test_sub.test_mod:TestMaths.test_div')
+        suite(res)
+
+        assert 'test_pak' in sys.modules, \
+               "Context not load test_pak"
+        m = sys.modules['test_pak']
+        print "test pak state", m.state
+        expect = ['test_pak.setup',
+                  'test_pak.test_sub.setup',
+                  'test_pak.test_sub.test_mod.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.setup_class',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_div',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown_class',
+                  'test_pak.test_sub.test_mod.teardown',
+                  'test_pak.test_sub.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(m.state, expect, diff(expect, m.state))
+
+    def test_fixture_context_multiple_names(self):
+        res = unittest.TestResult()
+        wd = os.path.join(support, 'package2')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromNames(
+            ['test_pak.test_sub.test_mod:TestMaths.test_div',
+             'test_pak.test_sub.test_mod:TestMaths.test_two_two',
+             'test_pak.test_mod:test_add'])
+        print suite
+        suite(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert 'test_pak' in sys.modules, \
+               "Context not load test_pak"
+        m = sys.modules['test_pak']
+        print "test pak state", m.state
+        expect = ['test_pak.setup',
+                  'test_pak.test_sub.setup',
+                  'test_pak.test_sub.test_mod.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.setup_class',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_div',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.setup',
+                  'test_pak.test_sub.test_mod.TestMaths.test_two_two',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown',
+                  'test_pak.test_sub.test_mod.TestMaths.teardown_class',
+                  'test_pak.test_sub.test_mod.teardown',
+                  'test_pak.test_sub.teardown',
+                  'test_pak.test_mod.setup',
+                  'test_pak.test_mod.test_add',
+                  'test_pak.test_mod.teardown',
+                  'test_pak.teardown']
+        self.assertEqual(m.state, expect, diff(expect, m.state))
+
+    def test_fixture_context_multiple_names_some_common_ancestors(self):
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 2)
+        wd = os.path.join(support, 'ltfn')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromNames(
+            ['test_pak1.test_mod',
+             'test_pak2:test_two_two',
+             'test_pak1:test_one_one'])
+        print suite
+        suite(res)
+        res.printErrors()
+        print stream.getvalue()
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert 'state' in sys.modules, \
+               "Context not load state module"
+        m = sys.modules['state']
+        print "state", m.called
+
+        expect = ['test_pak1.setup',
+                  'test_pak1.test_mod.setup',
+                  'test_pak1.test_mod.test_one_mod_one',
+                  'test_pak1.test_mod.teardown',
+                  'test_pak1.test_one_one',
+                  'test_pak1.teardown',
+                  'test_pak2.setup',
+                  'test_pak2.test_two_two',
+                  'test_pak2.teardown']
+        self.assertEqual(m.called, expect, diff(expect, m.called))
+
+    def test_fixture_context_multiple_names_no_common_ancestors(self):
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 2)
+        wd = os.path.join(support, 'ltfn')
+        l = loader.TestLoader(workingDir=wd)
+        suite = l.loadTestsFromNames(
+            ['test_pak1.test_mod',
+             'test_pak2:test_two_two',
+             'test_mod'])
+        print suite
+        suite(res)
+        res.printErrors()
+        print stream.getvalue()
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert 'state' in sys.modules, \
+               "Context not load state module"
+        m = sys.modules['state']
+        print "state", m.called
+
+        expect = ['test_pak1.setup',
+                  'test_pak1.test_mod.setup',
+                  'test_pak1.test_mod.test_one_mod_one',
+                  'test_pak1.test_mod.teardown',
+                  'test_pak1.teardown',
+                  'test_pak2.setup',
+                  'test_pak2.test_two_two',
+                  'test_pak2.teardown',
+                  'test_mod.setup',
+                  'test_mod.test_mod',
+                  'test_mod.teardown']
+        self.assertEqual(m.called, expect, diff(expect, m.called))
+    
+    def test_mod_setup_fails_no_tests_run(self):
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx)
+        suite = l.loadTestsFromName('mod_setup_fails.py')
+
+        res = unittest.TestResult()
+        suite(res)
+
+        assert res.errors
+        assert not res.failures, res.failures
+        assert res.testsRun == 0, \
+               "Expected to run 0 tests but ran %s" % res.testsRun
+
+    def test_mod_setup_skip_no_tests_run_no_errors(self):
+        config = Config(plugins=PluginManager(plugins=[Skip()]))
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx, config=config)
+        suite = l.loadTestsFromName('mod_setup_skip.py')
+
+        res = unittest.TestResult()
+        suite(res)
+
+        assert not suite.was_setup, "Suite setup did not fail"
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert res.skipped
+        assert res.testsRun == 0, \
+               "Expected to run 0 tests but ran %s" % res.testsRun
+
+    def test_mod_import_skip_one_test_no_errors(self):
+        config = Config(plugins=PluginManager(plugins=[Skip()]))
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx, config=config)
+        suite = l.loadTestsFromName('mod_import_skip.py')
+
+        res = unittest.TestResult()
+        suite(res)
+
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert res.testsRun == 1, \
+               "Expected to run 1 tests but ran %s" % res.testsRun
+
+    def test_failed_import(self):
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx)
+        suite = l.loadTestsFromName('no_such_module.py')
+
+        res = _TextTestResult(
+            stream=_WritelnDecorator(sys.stdout),
+            descriptions=0, verbosity=1)
+        suite(res)
+
+        print res.errors
+        res.printErrors()
+        assert res.errors, "Expected errors but got none"
+        assert not res.failures, res.failures
+        assert res.testsRun == 1, \
+               "Expected to run 1 tests but ran %s" % res.testsRun
+
+    def test_failed_import_module_name(self):
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx)
+        suite = l.loadTestsFromName('no_such_module')
+
+        res = _TextTestResult(
+            stream=_WritelnDecorator(sys.stdout),
+            descriptions=0, verbosity=1)
+        suite(res)
+        print res.errors
+        res.printErrors()
+        assert res.errors, "Expected errors but got none"
+        assert not res.failures, res.failures
+        err = res.errors[0][0].test.exc_class
+        assert err is ImportError, \
+            "Expected import error, got %s" % err
+
+    def test_load_nonsense_name(self):
+        ctx = os.path.join(support, 'ctx')
+        l = loader.TestLoader(workingDir=ctx)
+        suite = l.loadTestsFromName('fred!')
+
+        res = _TextTestResult(
+            stream=_WritelnDecorator(sys.stdout),
+            descriptions=0, verbosity=1)
+        suite(res)
+        print res.errors
+        assert res.errors, "Expected errors but got none"
+        assert not res.failures, res.failures
+
+    def test_generator_with_closure(self):
+        """Test that a generator test can employ a closure
+
+        Issue #3. If the generator binds early, the last value
+        of the closure will be seen for each generated test and
+        the tests will fail.
+        """
+        gen = os.path.join(support, 'gen')
+        l = loader.TestLoader(workingDir=gen)
+        suite = l.loadTestsFromName('test')
+        res = _TextTestResult(
+            stream=_WritelnDecorator(sys.stdout),
+            descriptions=0, verbosity=1)
+        suite(res)
+        assert not res.errors
+        self.assertEqual(res.testsRun, 5)
+
+    def test_issue_269(self):
+        """Test classes that raise exceptions in __init__ do not stop test run
+        """
+        wdir = os.path.join(support, 'issue269')
+        l = loader.TestLoader(workingDir=wdir)
+        suite = l.loadTestsFromName('test_bad_class')
+        res = _TextTestResult(
+            stream=_WritelnDecorator(sys.stdout),
+            descriptions=0, verbosity=1)
+        suite(res)
+        print res.errors
+        self.assertEqual(len(res.errors), 1)
+        assert 'raise Exception("pow")' in res.errors[0][1]
+        
+        
+# used for comparing lists
+def diff(a, b):
+    return '\n' + '\n'.join([ l for l in ndiff(a, b)
+                              if not l.startswith('  ') ])
+
+
+# used for context debugging
+class TreePrintContextSuite(suite.ContextSuite):
+    indent = ''
+
+    def setUp(self):
+        print self, 'setup -->'
+        suite.ContextSuite.setUp(self)
+        TreePrintContextSuite.indent += '  '
+
+    def tearDown(self):
+        TreePrintContextSuite.indent = TreePrintContextSuite.indent[:-2]
+        try:
+            suite.ContextSuite.tearDown(self)
+        finally:
+            print self, 'teardown <--'
+    def __repr__(self):
+        
+        return '%s<%s>' % (self.indent,
+                           getattr(self.context, '__name__', self.context))
+    __str__ = __repr__
+
+        
+if __name__ == '__main__':
+    #import logging
+    #logging.basicConfig() #level=logging.DEBUG)
+    #logging.getLogger('nose.suite').setLevel(logging.DEBUG)
+    unittest.main()
diff --git a/functional_tests/test_loader.pyc b/functional_tests/test_loader.pyc
new file mode 100644 (file)
index 0000000..4c1394e
Binary files /dev/null and b/functional_tests/test_loader.pyc differ
diff --git a/functional_tests/test_multiprocessing/support/nameerror.py b/functional_tests/test_multiprocessing/support/nameerror.py
new file mode 100644 (file)
index 0000000..20e7bd7
--- /dev/null
@@ -0,0 +1,4 @@
+# we purposefully raise a NameError at the top level here
+
+undefined_variable
+
diff --git a/functional_tests/test_multiprocessing/support/nameerror.pyc b/functional_tests/test_multiprocessing/support/nameerror.pyc
new file mode 100644 (file)
index 0000000..59fe392
Binary files /dev/null and b/functional_tests/test_multiprocessing/support/nameerror.pyc differ
diff --git a/functional_tests/test_multiprocessing/support/timeout.py b/functional_tests/test_multiprocessing/support/timeout.py
new file mode 100644 (file)
index 0000000..52dce12
--- /dev/null
@@ -0,0 +1,6 @@
+
+def test_timeout():
+    "this test *should* fail when process-timeout=1"
+    from time import sleep
+    sleep(2)
+
diff --git a/functional_tests/test_multiprocessing/test_nameerror$py.class b/functional_tests/test_multiprocessing/test_nameerror$py.class
new file mode 100644 (file)
index 0000000..d59dd84
Binary files /dev/null and b/functional_tests/test_multiprocessing/test_nameerror$py.class differ
diff --git a/functional_tests/test_multiprocessing/test_nameerror.py b/functional_tests/test_multiprocessing/test_nameerror.py
new file mode 100644 (file)
index 0000000..f73d02b
--- /dev/null
@@ -0,0 +1,31 @@
+import os
+import unittest
+
+from nose.plugins import PluginTester
+from nose.plugins.skip import SkipTest
+from nose.plugins.multiprocess import MultiProcess
+
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+def setup():
+    try:
+        import multiprocessing
+        if 'active' in MultiProcess.status:
+            raise SkipTest("Multiprocess plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("multiprocessing module not available")
+
+
+class TestMPNameError(PluginTester, unittest.TestCase):
+    activate = '--processes=2'
+    plugins = [MultiProcess()]
+    suitepath = os.path.join(support, 'nameerror.py')
+
+    def runTest(self):
+        print str(self.output)
+        assert 'NameError' in self.output
+        assert "'undefined_variable' is not defined" in self.output
+
diff --git a/functional_tests/test_multiprocessing/test_nameerror.pyc b/functional_tests/test_multiprocessing/test_nameerror.pyc
new file mode 100644 (file)
index 0000000..014f45f
Binary files /dev/null and b/functional_tests/test_multiprocessing/test_nameerror.pyc differ
diff --git a/functional_tests/test_multiprocessing/test_process_timeout$py.class b/functional_tests/test_multiprocessing/test_process_timeout$py.class
new file mode 100644 (file)
index 0000000..1fd8f08
Binary files /dev/null and b/functional_tests/test_multiprocessing/test_process_timeout$py.class differ
diff --git a/functional_tests/test_multiprocessing/test_process_timeout.py b/functional_tests/test_multiprocessing/test_process_timeout.py
new file mode 100644 (file)
index 0000000..535ecdb
--- /dev/null
@@ -0,0 +1,37 @@
+import os
+import unittest
+
+from nose.plugins import PluginTester
+from nose.plugins.skip import SkipTest
+from nose.plugins.multiprocess import MultiProcess
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+def setup():
+    try:
+        import multiprocessing
+        if 'active' in MultiProcess.status:
+            raise SkipTest("Multiprocess plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("multiprocessing module not available")
+
+
+
+class TestMPTimeout(PluginTester, unittest.TestCase):
+    activate = '--processes=2'
+    args = ['--process-timeout=1']
+    plugins = [MultiProcess()]
+    suitepath = os.path.join(support, 'timeout.py')
+
+    def runTest(self):
+        assert "TimedOutException: 'timeout.test_timeout'" in self.output
+
+
+class TestMPTimeoutPass(TestMPTimeout):
+    args = ['--process-timeout=3']
+
+    def runTest(self):
+        assert "TimedOutException: 'timeout.test_timeout'" not in self.output
+        assert str(self.output).strip().endswith('OK')
diff --git a/functional_tests/test_multiprocessing/test_process_timeout.pyc b/functional_tests/test_multiprocessing/test_process_timeout.pyc
new file mode 100644 (file)
index 0000000..b961220
Binary files /dev/null and b/functional_tests/test_multiprocessing/test_process_timeout.pyc differ
diff --git a/functional_tests/test_namespace_pkg$py.class b/functional_tests/test_namespace_pkg$py.class
new file mode 100644 (file)
index 0000000..c0aeb57
Binary files /dev/null and b/functional_tests/test_namespace_pkg$py.class differ
diff --git a/functional_tests/test_namespace_pkg.py b/functional_tests/test_namespace_pkg.py
new file mode 100644 (file)
index 0000000..2db051e
--- /dev/null
@@ -0,0 +1,58 @@
+import os
+import sys
+import unittest
+from cStringIO import StringIO
+from nose.core import TestProgram
+from test_program import TestRunner
+
+here = os.path.dirname(__file__)
+support = os.path.join(here, 'support')
+
+class TestNamespacePackages(unittest.TestCase):
+
+    def setUp(self):
+        self.cwd = os.getcwd()
+        self.orig_path = sys.path[:]
+        test_dir = os.path.join(support, 'namespace_pkg')
+        os.chdir(test_dir)
+        sys.path.append(os.path.join(test_dir, 'site-packages'))
+
+    def tearDown(self):
+        sys.path = self.orig_path
+        os.chdir(self.cwd)
+
+    def test_namespace_pkg(self):
+        """Ensure namespace packages work/can import from each other"""
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+        runner.verbosity = 2
+        prog = TestProgram(argv=[''],
+                           testRunner=runner,
+                           exit=False)
+        res = runner.result
+        self.assertEqual(res.testsRun, 1,
+                         "Expected to run 1 test, ran %s" % res.testsRun)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+
+    def test_traverse_namespace(self):
+        """Ensure the --traverse-namespace option tests the other
+        namespace package sibling also.
+        """
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+        runner.verbosity = 2
+        prog = TestProgram(argv=['', '--traverse-namespace'],
+                           testRunner=runner,
+                           exit=False)
+        res = runner.result
+        self.assertEqual(res.testsRun, 2,
+                         "Expected to run 2 tests, ran %s" % res.testsRun)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_namespace_pkg.pyc b/functional_tests/test_namespace_pkg.pyc
new file mode 100644 (file)
index 0000000..eee2921
Binary files /dev/null and b/functional_tests/test_namespace_pkg.pyc differ
diff --git a/functional_tests/test_plugin_api$py.class b/functional_tests/test_plugin_api$py.class
new file mode 100644 (file)
index 0000000..6c30a0f
Binary files /dev/null and b/functional_tests/test_plugin_api$py.class differ
diff --git a/functional_tests/test_plugin_api.py b/functional_tests/test_plugin_api.py
new file mode 100644 (file)
index 0000000..c508ded
--- /dev/null
@@ -0,0 +1,45 @@
+"""
+Functional tests of plugin apis -- individual plugintester runs for
+test plugins that implement one or more hooks for testing.
+"""
+import os
+import sys
+import unittest
+from nose.plugins import Plugin, PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class AllFail(Plugin):
+    def prepareTestCase(self, test):
+        self.test = test
+        return self.fail
+
+    def fail(self, result):
+        result.startTest(self.test)
+        try:
+            try:
+                assert False, "I want to fail!"
+            except:
+                result.addFailure(self.test, sys.exc_info())
+        finally:
+            result.stopTest(self.test)
+
+class TestPrepareTestCase_MakeAllFail(PluginTester, unittest.TestCase):
+    activate = '--with-allfail'
+    args = ['-v']
+    plugins = [AllFail()]
+    suitepath = os.path.join(support, 'package2')
+    
+    def runTest(self):
+        print "x" * 70
+        print str(self.output)
+        print "x" * 70
+        for line in self.output:
+            if line.startswith('test_pak'):
+                assert line.strip().endswith('FAIL'), \
+                       "Expected failure but got: %s" % line.strip()
+        assert not str(self.output).strip().endswith('OK')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_plugin_api.pyc b/functional_tests/test_plugin_api.pyc
new file mode 100644 (file)
index 0000000..e301c02
Binary files /dev/null and b/functional_tests/test_plugin_api.pyc differ
diff --git a/functional_tests/test_plugins$py.class b/functional_tests/test_plugins$py.class
new file mode 100644 (file)
index 0000000..9e5aa9c
Binary files /dev/null and b/functional_tests/test_plugins$py.class differ
diff --git a/functional_tests/test_plugins.py b/functional_tests/test_plugins.py
new file mode 100644 (file)
index 0000000..eff6c7a
--- /dev/null
@@ -0,0 +1,71 @@
+import os
+import sys
+import unittest
+from nose.config import Config
+from nose.core import TestProgram
+
+here = os.path.abspath(os.path.dirname(__file__))
+support = os.path.join(here, 'support')
+units = os.path.normpath(os.path.join(here, '..', 'unit_tests'))
+
+if units not in sys.path:
+    sys.path.insert(0, units)
+from mock import RecordingPluginManager
+
+
+class TestPluginCalls(unittest.TestCase):
+    """
+    Tests how plugins are called throughout a standard test run
+    """
+    def test_plugin_calls_package1(self):
+        wdir = os.path.join(support, 'package1')
+        man = RecordingPluginManager()
+        conf = Config(plugins=man, stream=sys.stdout)
+        t = TestProgram(defaultTest=wdir, config=conf,
+                        argv=['test_plugin_calls_package1'], exit=False)
+        print man.calls()
+        assert man.called
+
+        self.assertEqual(
+            man.calls(),
+            ['loadPlugins', 'addOptions', 'configure', 'begin',
+             'prepareTestLoader', 'loadTestsFromNames', 'loadTestsFromName',
+             'prepareTestRunner', 'prepareTest', 'setOutputStream',
+             'prepareTestResult', 'beforeDirectory', 'wantFile',
+             'wantDirectory', 'beforeContext', 'beforeImport',
+             'afterImport', 'wantModule', 'wantClass', 'wantFunction',
+             'makeTest', 'wantMethod', 'loadTestsFromTestClass',
+             'loadTestsFromTestCase', 'loadTestsFromModule', 'startContext',
+             'beforeTest', 'prepareTestCase', 'startTest', 'addSuccess',
+             'stopTest', 'afterTest', 'stopContext', 'afterContext',
+             'loadTestsFromDir', 'afterDirectory',
+             'report', 'finalize'])
+
+    def test_plugin_calls_package1_verbose(self):
+        wdir = os.path.join(support, 'package1')
+        man = RecordingPluginManager()
+        conf = Config(plugins=man, stream=sys.stdout)
+        t = TestProgram(defaultTest=wdir, config=conf,
+                        argv=['test_plugin_calls_package1', '-v'], exit=False)
+        print man.calls()
+        assert man.called
+
+        self.assertEqual(
+            man.calls(),
+            ['loadPlugins', 'addOptions', 'configure', 'begin',
+             'prepareTestLoader', 'loadTestsFromNames', 'loadTestsFromName',
+             'prepareTestRunner', 'prepareTest', 'setOutputStream',
+             'prepareTestResult', 'beforeDirectory', 'wantFile',
+             'wantDirectory', 'beforeContext', 'beforeImport',
+             'afterImport', 'wantModule', 'wantClass', 'wantFunction',
+             'makeTest', 'wantMethod', 'loadTestsFromTestClass',
+             'loadTestsFromTestCase', 'loadTestsFromModule', 'startContext',
+             'beforeTest', 'prepareTestCase', 'startTest', 'describeTest',
+             'testName', 'addSuccess', 'stopTest', 'afterTest', 'stopContext',
+             'afterContext', 'loadTestsFromDir', 'afterDirectory',
+             'report', 'finalize'])
+
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_plugins.pyc b/functional_tests/test_plugins.pyc
new file mode 100644 (file)
index 0000000..7db3064
Binary files /dev/null and b/functional_tests/test_plugins.pyc differ
diff --git a/functional_tests/test_plugintest$py.class b/functional_tests/test_plugintest$py.class
new file mode 100644 (file)
index 0000000..2393f42
Binary files /dev/null and b/functional_tests/test_plugintest$py.class differ
diff --git a/functional_tests/test_plugintest.py b/functional_tests/test_plugintest.py
new file mode 100644 (file)
index 0000000..7d1a65b
--- /dev/null
@@ -0,0 +1,51 @@
+
+import unittest, os
+from nose.plugins import PluginTester, Plugin
+from nose.tools import eq_
+from cStringIO import StringIO
+
+class StubPlugin(Plugin):
+    def options(self, parser, env=os.environ):
+        super(StubPlugin, self).options(parser, env=env)
+    def configure(self, options, conf):
+        pass    
+
+class SomePluginTestCase(PluginTester):
+    activate = None # set this to --with-yourplugin, etc
+    plugins = [] # list of plugin instances
+    
+    def makeSuite(self):
+        class SomeTest(unittest.TestCase):
+            def runTest(self):
+                raise ValueError("Now do something, plugin!")
+        return unittest.TestSuite([SomeTest()])      
+
+class TestPluginTester(unittest.TestCase):
+    def _runPluginTest(self, test_case):
+        loader = unittest.TestLoader()
+        suite = loader.loadTestsFromTestCase(test_case)
+        res = unittest.TestResult()
+        suite(res)
+        return res
+        
+    def testPluginTesterExecsPlugin(self):
+        called = []
+        class MockExecPlugin(StubPlugin):
+            def configure(self, options, conf):
+                called.append('configure')
+        
+        class MockExecTestCase(SomePluginTestCase, unittest.TestCase):
+            activate = '--with-mockexec'
+            plugins = [MockExecPlugin()]
+            
+            def test_something_anything(self):
+                # here is where the test case would test
+                # that the plugin interacted with stub tests
+                pass      
+            
+        res = self._runPluginTest(MockExecTestCase)
+        eq_(res.testsRun, 1)
+        eq_(called[0], 'configure')
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/functional_tests/test_plugintest.pyc b/functional_tests/test_plugintest.pyc
new file mode 100644 (file)
index 0000000..b20a39b
Binary files /dev/null and b/functional_tests/test_plugintest.pyc differ
diff --git a/functional_tests/test_program$py.class b/functional_tests/test_program$py.class
new file mode 100644 (file)
index 0000000..7c24d97
Binary files /dev/null and b/functional_tests/test_program$py.class differ
diff --git a/functional_tests/test_program.py b/functional_tests/test_program.py
new file mode 100644 (file)
index 0000000..bb63818
--- /dev/null
@@ -0,0 +1,189 @@
+import os
+import unittest
+from cStringIO import StringIO
+from nose import SkipTest
+from nose.core import TestProgram
+from nose.config import Config
+from nose.plugins.manager import DefaultPluginManager
+from nose.result import _TextTestResult
+
+here = os.path.dirname(__file__)
+support = os.path.join(here, 'support')
+
+class TestRunner(unittest.TextTestRunner):
+    def _makeResult(self):
+        self.result = _TextTestResult(
+            self.stream, self.descriptions, self.verbosity)
+        return self.result 
+
+# Note that all of these tests use a set config to avoid the loading
+# of plugins or settings from .noserc.
+
+class TestTestProgram(unittest.TestCase):
+
+    def test_run_support_ctx(self):
+        """Collect and run tests in functional_tests/support/ctx
+
+        This should collect no tests in the default configuration, since
+        none of the modules have test-like names.
+        """
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+        prog = TestProgram(defaultTest=os.path.join(support, 'ctx'),
+                           argv=['test_run_support_ctx'],
+                           testRunner=runner,
+                           config=Config(),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 0,
+                         "Expected to run 0 tests, ran %s" % res.testsRun)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+
+    def test_run_support_package2(self):
+        """Collect and run tests in functional_tests/support/package2
+
+        This should collect and run 5 tests.
+        """
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+        prog = TestProgram(defaultTest=os.path.join(support, 'package2'),
+                           argv=['test_run_support_package2', '-v'],
+                           testRunner=runner,
+                           config=Config(),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 5,
+                         "Expected to run 5 tests, ran %s" % res.testsRun)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+        
+    def test_run_support_package3(self):
+        """Collect and run tests in functional_tests/support/package3
+
+        This should collect and run 2 test. The package layout is:
+
+        lib/
+          a.py
+        src/
+          b.py
+        tests/
+          test_a.py
+          test_b.py
+        """
+        stream = StringIO()
+        runner = TestRunner(stream=stream)
+
+        prog = TestProgram(defaultTest=os.path.join(support, 'package3'),
+                           argv=['test_run_support_package3', '-v'],
+                           testRunner=runner,
+                           config=Config(),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 2,
+                         "Expected to run 2 tests, ran %s" % res.testsRun)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+
+    def test_run_support_twist(self):
+        """Collect and run tests in functional/support/twist
+
+        This should collect and run 4 tests with 2 fails and an error.
+        """
+        try:
+            from twisted.trial.unittest import TestCase
+        except ImportError:
+            raise SkipTest('twisted not available; skipping')
+        stream = StringIO()
+        runner = TestRunner(stream=stream, verbosity=2)
+
+        prog = TestProgram(defaultTest=os.path.join(support, 'twist'),
+                           argv=['test_run_support_twist'],
+                           testRunner=runner,
+                           config=Config(stream=stream),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+
+        # some versions of twisted.trial.unittest.TestCase have
+        # runTest in the base class -- this is wrong! But we have
+        # to deal with it
+        if hasattr(TestCase, 'runTest'):
+            expect = 5
+        else:
+            expect = 4
+        self.assertEqual(res.testsRun, expect,
+                         "Expected to run %s tests, ran %s" %
+                         (expect, res.testsRun))
+        assert not res.wasSuccessful()
+        assert len(res.errors) == 1
+        assert len(res.failures) == 2
+
+    def test_issue_130(self):
+        """Collect and run tests in support/issue130 without error.
+
+        This tests that the result and error classes can handle string
+        exceptions.
+        """
+        import warnings
+        warnings.filterwarnings('ignore', category=DeprecationWarning,
+                                module='test')
+        
+        stream = StringIO()
+        runner = TestRunner(stream=stream, verbosity=2)
+
+        prog = TestProgram(defaultTest=os.path.join(support, 'issue130'),
+                           argv=['test_issue_130'],
+                           testRunner=runner,
+                           config=Config(stream=stream,
+                                         plugins=DefaultPluginManager()),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 0) # error is in setup
+        assert not res.wasSuccessful()
+        assert res.errors
+        assert not res.failures
+
+    def test_defaultTest_list(self):
+        stream = StringIO()
+        runner = TestRunner(stream=stream, verbosity=2)
+        tests = [os.path.join(support, 'package2'),
+                 os.path.join(support, 'package3')]
+        prog = TestProgram(defaultTest=tests,
+                           argv=['test_run_support_package2_3', '-v'],
+                           testRunner=runner,
+                           config=Config(),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 7)
+
+    def test_illegal_packages_not_selected(self):
+        stream = StringIO()
+        runner = TestRunner(stream=stream, verbosity=2)
+
+        prog = TestProgram(defaultTest=os.path.join(support, 'issue143'),
+                           argv=['test_issue_143'],
+                           testRunner=runner,
+                           config=Config(stream=stream,
+                                         plugins=DefaultPluginManager()),
+                           exit=False)
+        res = runner.result
+        print stream.getvalue()
+        self.assertEqual(res.testsRun, 0)
+        assert res.wasSuccessful()
+        assert not res.errors
+        assert not res.failures
+        
+
+if __name__ == '__main__':
+    #import logging
+    #logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/functional_tests/test_program.pyc b/functional_tests/test_program.pyc
new file mode 100644 (file)
index 0000000..c7ee92f
Binary files /dev/null and b/functional_tests/test_program.pyc differ
diff --git a/functional_tests/test_result$py.class b/functional_tests/test_result$py.class
new file mode 100644 (file)
index 0000000..a8334dc
Binary files /dev/null and b/functional_tests/test_result$py.class differ
diff --git a/functional_tests/test_result.py b/functional_tests/test_result.py
new file mode 100644 (file)
index 0000000..7206020
--- /dev/null
@@ -0,0 +1,32 @@
+import os
+import sys
+import unittest
+from cStringIO import StringIO
+from nose.config import Config
+from nose.core import TestProgram
+from nose.plugins.manager import PluginManager
+
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestResultSummary(unittest.TestCase):
+
+    def test_with_todo_plugin(self):
+        pkpath = os.path.join(support, 'todo')
+        sys.path.insert(0, pkpath)
+        from todoplug import TodoPlugin
+
+        stream = StringIO()
+        config = Config(stream=stream,
+                        plugins=PluginManager([TodoPlugin()]))
+        
+        TestProgram(argv=['t', '--with-todo', pkpath],
+                    config=config, exit=False)
+        out = stream.getvalue()
+        print out
+        self.assert_('FAILED (TODO=1)' in out)
+
+
+if __name__ == '__main__':
+    unittest.main()
+        
diff --git a/functional_tests/test_result.pyc b/functional_tests/test_result.pyc
new file mode 100644 (file)
index 0000000..2595f1b
Binary files /dev/null and b/functional_tests/test_result.pyc differ
diff --git a/functional_tests/test_selector$py.class b/functional_tests/test_selector$py.class
new file mode 100644 (file)
index 0000000..8446a8b
Binary files /dev/null and b/functional_tests/test_selector$py.class differ
diff --git a/functional_tests/test_selector.py b/functional_tests/test_selector.py
new file mode 100644 (file)
index 0000000..6282ac4
--- /dev/null
@@ -0,0 +1,17 @@
+import os
+import unittest
+from nose.selector import Selector, TestAddress
+
+support = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
+
+class TestTestAddress(unittest.TestCase):
+
+    def test_module_filename(self):
+        wd = os.path.join(support, 'package2')
+        addr = TestAddress('test_pak.test_mod', workingDir=wd)
+        self.assertEqual(addr.filename,
+                         os.path.join(wd, 'test_pak', 'test_mod.py'))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_selector.pyc b/functional_tests/test_selector.pyc
new file mode 100644 (file)
index 0000000..b67dba2
Binary files /dev/null and b/functional_tests/test_selector.pyc differ
diff --git a/functional_tests/test_skip_pdb_interaction$py.class b/functional_tests/test_skip_pdb_interaction$py.class
new file mode 100644 (file)
index 0000000..2827fa4
Binary files /dev/null and b/functional_tests/test_skip_pdb_interaction$py.class differ
diff --git a/functional_tests/test_skip_pdb_interaction.py b/functional_tests/test_skip_pdb_interaction.py
new file mode 100644 (file)
index 0000000..20c5f78
--- /dev/null
@@ -0,0 +1,49 @@
+import unittest
+from nose import case
+from nose.config import Config
+from nose.plugins import debug
+from nose.plugins.manager import PluginManager
+from nose.plugins.skip import Skip, SkipTest
+from nose.proxy import ResultProxyFactory
+
+
+class StubPdb:
+    called = False
+    def post_mortem(self, tb):
+        self.called = True
+
+class TestSkipPdbInteraction(unittest.TestCase):
+    """Tests interaction between skip plugin and pdb plugin -- pdb should
+    not fire on a skip error
+    """
+    def setUp(self):
+        self._pdb = debug.pdb
+        debug.pdb = StubPdb()
+
+    def tearDown(self):
+        debug.pdb = self._pdb
+    
+    def test_skip_prevents_pdb_call(self):
+
+        class TC(unittest.TestCase):
+            def test(self):
+                raise SkipTest('not me')
+
+        skip = Skip()
+        skip.enabled = True
+        p = debug.Pdb()
+        p.enabled = True
+        p.enabled_for_errors = True
+        res = unittest.TestResult()
+        conf = Config(plugins=PluginManager(plugins=[skip, p]))        
+        rpf = ResultProxyFactory(conf)
+        test = case.Test(TC('test'), resultProxy=rpf)
+        test(res)
+
+        assert not res.errors, "Skip was recorded as error %s" % res.errors
+        assert not debug.pdb.called, "pdb was called"
+
+        
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_skip_pdb_interaction.pyc b/functional_tests/test_skip_pdb_interaction.pyc
new file mode 100644 (file)
index 0000000..9694c0c
Binary files /dev/null and b/functional_tests/test_skip_pdb_interaction.pyc differ
diff --git a/functional_tests/test_success$py.class b/functional_tests/test_success$py.class
new file mode 100644 (file)
index 0000000..6a07a87
Binary files /dev/null and b/functional_tests/test_success$py.class differ
diff --git a/functional_tests/test_success.py b/functional_tests/test_success.py
new file mode 100644 (file)
index 0000000..760a7d1
--- /dev/null
@@ -0,0 +1,43 @@
+import os
+import unittest
+from nose.plugins.plugintest import PluginTester, remove_timings
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+
+class TestSingleTestPass(PluginTester, unittest.TestCase):
+    activate = '-v'
+    plugins = []
+    suitepath = os.path.join(support, 'pass')
+
+    def test_single_test_pass(self):
+        # note that this doesn't use nose.plugins.doctests.run, in order that
+        # this test fails if the final terminating newline is not present (it
+        # could still be written as a doctest -- PluginTester was just closer
+        # to hand)
+        print self.output
+        output = remove_timings(str(self.output))
+        assert output == """\
+test.test ... ok
+
+----------------------------------------------------------------------
+Ran 1 test in ...s
+
+OK
+"""
+
+class TestZeroTestsPass(PluginTester, unittest.TestCase):
+    activate = '-v'
+    plugins = []
+    suitepath = os.path.join(support, 'empty')
+
+    def test_zero_tests_pass(self):
+        print self.output
+        output = remove_timings(str(self.output))
+        assert output == """\
+
+----------------------------------------------------------------------
+Ran 0 tests in ...s
+
+OK
+"""
diff --git a/functional_tests/test_success.pyc b/functional_tests/test_success.pyc
new file mode 100644 (file)
index 0000000..9ed5392
Binary files /dev/null and b/functional_tests/test_success.pyc differ
diff --git a/functional_tests/test_suite$py.class b/functional_tests/test_suite$py.class
new file mode 100644 (file)
index 0000000..4a30880
Binary files /dev/null and b/functional_tests/test_suite$py.class differ
diff --git a/functional_tests/test_suite.py b/functional_tests/test_suite.py
new file mode 100644 (file)
index 0000000..a411469
--- /dev/null
@@ -0,0 +1,47 @@
+import os
+import sys
+import unittest
+from nose import case
+from nose.suite import ContextSuiteFactory
+
+support = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
+
+class TestContextSuiteFactory(unittest.TestCase):
+
+    def setUp(self):
+        self._mods = sys.modules.copy()
+        self._path = sys.path[:]
+        sys.path.insert(0, os.path.join(support, 'package2'))
+
+    def tearDown(self):
+        to_del = [ m for m in sys.modules.keys() if
+                   m not in self._mods ]
+        if to_del:
+            for mod in to_del:
+                del sys.modules[mod]
+        sys.modules.update(self._mods)
+        sys.path = self._path
+
+    def test_find_context(self):
+        from test_pak import test_mod
+        
+        factory = ContextSuiteFactory()
+        tests = [case.FunctionTestCase(test_mod.test_add),
+                 case.FunctionTestCase(test_mod.test_minus)]
+        suite = factory(tests)
+        self.assertEqual(suite.context, test_mod)
+
+    def test_ancestry(self):
+        from test_pak.test_sub.test_mod import TestMaths
+        from test_pak.test_sub import test_mod
+        from test_pak import test_sub
+        import test_pak
+        
+        factory = ContextSuiteFactory()
+        ancestry = [l for l in factory.ancestry(TestMaths)]
+        self.assertEqual(ancestry,
+                         [test_mod, test_sub, test_pak])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/functional_tests/test_suite.pyc b/functional_tests/test_suite.pyc
new file mode 100644 (file)
index 0000000..00af036
Binary files /dev/null and b/functional_tests/test_suite.pyc differ
diff --git a/functional_tests/test_withid_failures.rst b/functional_tests/test_withid_failures.rst
new file mode 100644 (file)
index 0000000..5a371b7
--- /dev/null
@@ -0,0 +1,50 @@
+    >>> import os
+    >>> import sys
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> from nose.plugins.testid import TestId
+    >>> import tempfile
+    >>> idfile = tempfile.mktemp()
+    >>> support = os.path.join(os.path.dirname(__file__), 'support', 'id_fails')
+    >>> argv = [__file__, '-v', '--with-id', '--id-file', idfile, support]
+    >>> run(argv=argv, plugins=[TestId()])
+    #1 Failure: ImportError (No module named apackagethatdoesntexist) ... ERROR
+    #2 test_b.test ... ok
+    #3 test_b.test_fail ... FAIL
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Failure: ImportError (No module named apackagethatdoesntexist)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    ImportError: No module named apackagethatdoesntexist
+    <BLANKLINE>
+    ======================================================================
+    FAIL: test_b.test_fail
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AssertionError
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 3 tests in ...s
+    <BLANKLINE>
+    FAILED (errors=1, failures=1)
+
+Addressing failures works (sometimes).
+
+    >>> argv.append('1')
+    >>> _junk = sys.modules.pop('test_a', None) # 2.3 requires
+    >>> run(argv=argv, plugins=[TestId()])
+    #1 Failure: ImportError (No module named apackagethatdoesntexist) ... ERROR
+    <BLANKLINE>
+    ======================================================================
+    ERROR: Failure: ImportError (No module named apackagethatdoesntexist)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    ImportError: No module named apackagethatdoesntexist
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (errors=1)
diff --git a/functional_tests/test_xunit$py.class b/functional_tests/test_xunit$py.class
new file mode 100644 (file)
index 0000000..b699ed2
Binary files /dev/null and b/functional_tests/test_xunit$py.class differ
diff --git a/functional_tests/test_xunit.py b/functional_tests/test_xunit.py
new file mode 100644 (file)
index 0000000..5d21a17
--- /dev/null
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+import codecs
+import os
+import sys
+import unittest
+from nose.plugins.xunit import Xunit
+from nose.plugins.skip import Skip
+from nose.plugins import PluginTester
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+xml_results_filename = os.path.join(support, "xunit.xml")
+
+# the plugin is tested better in unit tests.
+# this is just here for a sanity check
+    
+class TestXUnitPlugin(PluginTester, unittest.TestCase):
+    activate = '--with-xunit'
+    args = ['-v','--xunit-file=%s' % xml_results_filename]
+    plugins = [Xunit(), Skip()]
+    suitepath = os.path.join(support, 'xunit')
+    
+    def runTest(self):
+        print str(self.output)
+        
+        assert "ERROR: test_error" in self.output
+        assert "FAIL: test_fail" in self.output
+        assert "test_skip (test_xunit_as_suite.TestForXunit) ... SKIP: skipit" in self.output
+        assert "XML: %s" % xml_results_filename in self.output
+        
+        f = codecs.open(xml_results_filename,'r', encoding='utf8')
+        result = f.read()
+        f.close()
+        print result.encode('utf8', 'replace')
+        
+        assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+        assert '<testsuite name="nosetests" tests="6" errors="2" failures="1" skip="1">' in result
+        assert '<testcase classname="test_xunit_as_suite.TestForXunit" name="test_error" time="' in result
+        # TODO(Kumar) think of better x-platform code here that
+        # does not confuse 2to3
+        if sys.version_info[0:2] >= (3,0):
+            assert ('<error type="%s.Exception" message="日本">' % (Exception.__module__,)) in result
+        else:
+            assert ('<error type="%s.Exception" message="日本">' % (Exception.__module__,)).decode('utf8') in result
+        assert '</testcase>' in result
+        assert '</testsuite>' in result
+
+
+class TestIssue279(PluginTester, unittest.TestCase):
+    activate = '--with-xunit'
+    args = ['-v','--xunit-file=%s' % xml_results_filename]
+    plugins = [Xunit(), Skip()]
+    suitepath = os.path.join(support, 'issue279')
+
+    def runTest(self):
+        print str(self.output)
+        f = open(xml_results_filename,'r')
+        result = f.read()
+        f.close()
+        print result
+        assert 'tests="1" errors="1" failures="0" skip="0"' in result
+        assert "Exception: I would prefer not to" in result
diff --git a/functional_tests/test_xunit.pyc b/functional_tests/test_xunit.pyc
new file mode 100644 (file)
index 0000000..9d0f167
Binary files /dev/null and b/functional_tests/test_xunit.pyc differ
diff --git a/install-rpm.sh b/install-rpm.sh
new file mode 100755 (executable)
index 0000000..68f7f01
--- /dev/null
@@ -0,0 +1,3 @@
+python setup.py install --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES
+# 'brp-compress' gzips the man pages without distutils knowing... fix this
+sed -i -e 's@man/man\([[:digit:]]\)/\(.\+\.[[:digit:]]\)$@man/man\1/\2.gz@g' INSTALLED_FILES
diff --git a/lgpl.txt b/lgpl.txt
new file mode 100644 (file)
index 0000000..8add30a
--- /dev/null
+++ b/lgpl.txt
@@ -0,0 +1,504 @@
+                 GNU LESSER GENERAL PUBLIC LICENSE
+                      Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+                           Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+\f
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+\f
+                 GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+\f
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+\f
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+\f
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+\f
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+\f
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+\f
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+                           NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+                    END OF TERMS AND CONDITIONS
+\f
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/nose/__init__.py b/nose/__init__.py
new file mode 100644 (file)
index 0000000..1ebf76d
--- /dev/null
@@ -0,0 +1,15 @@
+from nose.core import collector, main, run, run_exit, runmodule
+# backwards compatibility
+from nose.exc import SkipTest, DeprecatedTest
+from nose.tools import with_setup
+
+__author__ = 'Jason Pellerin'
+__versioninfo__ = (1, 1, 2)
+__version__ = '.'.join(map(str, __versioninfo__))
+
+__all__ = [
+    'main', 'run', 'run_exit', 'runmodule', 'with_setup',
+    'SkipTest', 'DeprecatedTest', 'collector'
+    ]
+
+
diff --git a/nose/case.py b/nose/case.py
new file mode 100644 (file)
index 0000000..cffa4ab
--- /dev/null
@@ -0,0 +1,397 @@
+"""nose unittest.TestCase subclasses. It is not necessary to subclass these
+classes when writing tests; they are used internally by nose.loader.TestLoader
+to create test cases from test functions and methods in test classes.
+"""
+import logging
+import sys
+import unittest
+from inspect import isfunction
+from nose.config import Config
+from nose.failure import Failure # for backwards compatibility
+from nose.util import resolve_name, test_address, try_run
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Test']
+
+
+class Test(unittest.TestCase):
+    """The universal test case wrapper.
+
+    When a plugin sees a test, it will always see an instance of this
+    class. To access the actual test case that will be run, access the
+    test property of the nose.case.Test instance.
+    """
+    __test__ = False # do not collect
+    def __init__(self, test, config=None, resultProxy=None):
+        # sanity check
+        if not callable(test):
+            raise TypeError("nose.case.Test called with argument %r that "
+                            "is not callable. A callable is required."
+                            % test)
+        self.test = test
+        if config is None:
+            config = Config()
+        self.config = config
+        self.tbinfo = None
+        self.capturedOutput = None
+        self.resultProxy = resultProxy
+        self.plugins = config.plugins
+        self.passed = None
+        unittest.TestCase.__init__(self)
+
+    def __call__(self, *arg, **kwarg):
+        return self.run(*arg, **kwarg)
+
+    def __str__(self):
+        name = self.plugins.testName(self)
+        if name is not None:
+            return name
+        return str(self.test)
+
+    def __repr__(self):
+        return "Test(%r)" % self.test
+
+    def afterTest(self, result):
+        """Called after test is complete (after result.stopTest)
+        """
+        try:
+            afterTest = result.afterTest
+        except AttributeError:
+            pass
+        else:
+            afterTest(self.test)
+
+    def beforeTest(self, result):
+        """Called before test is run (before result.startTest)
+        """
+        try:
+            beforeTest = result.beforeTest
+        except AttributeError:
+            pass
+        else:
+            beforeTest(self.test)
+
+    def exc_info(self):
+        """Extract exception info.
+        """
+        exc, exv, tb = sys.exc_info()
+        return (exc, exv, tb)
+
+    def id(self):
+        """Get a short(er) description of the test
+        """
+        return self.test.id()
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if hasattr(self.test, 'address'):
+            return self.test.address()
+        else:
+            # not a nose case
+            return test_address(self.test)
+
+    def _context(self):
+        try:
+            return self.test.context
+        except AttributeError:
+            pass
+        try:
+            return self.test.__class__
+        except AttributeError:
+            pass
+        try:
+            return resolve_name(self.test.__module__)
+        except AttributeError:
+            pass
+        return None
+    context = property(_context, None, None,
+                      """Get the context object of this test (if any).""")
+
+    def run(self, result):
+        """Modified run for the test wrapper.
+
+        From here we don't call result.startTest or stopTest or
+        addSuccess.  The wrapper calls addError/addFailure only if its
+        own setup or teardown fails, or running the wrapped test fails
+        (eg, if the wrapped "test" is not callable).
+
+        Two additional methods are called, beforeTest and
+        afterTest. These give plugins a chance to modify the wrapped
+        test before it is called and do cleanup after it is
+        called. They are called unconditionally.
+        """
+        if self.resultProxy:
+            result = self.resultProxy(result, self)
+        try:
+            try:
+                self.beforeTest(result)
+                self.runTest(result)
+            except KeyboardInterrupt:
+                raise
+            except:
+                err = sys.exc_info()
+                result.addError(self, err)
+        finally:
+            self.afterTest(result)
+
+    def runTest(self, result):
+        """Run the test. Plugins may alter the test by returning a
+        value from prepareTestCase. The value must be callable and
+        must accept one argument, the result instance.
+        """
+        test = self.test
+        plug_test = self.config.plugins.prepareTestCase(self)
+        if plug_test is not None:
+            test = plug_test
+        test(result)
+
+    def shortDescription(self):
+        desc = self.plugins.describeTest(self)
+        if desc is not None:
+            return desc
+        # work around bug in unittest.TestCase.shortDescription
+        # with multiline docstrings.
+        test = self.test
+        try:
+            test._testMethodDoc = test._testMethodDoc.strip()# 2.5
+        except AttributeError:
+            try:
+                # 2.4 and earlier
+                test._TestCase__testMethodDoc = \
+                    test._TestCase__testMethodDoc.strip()
+            except AttributeError:
+                pass
+        # 2.7 compat: shortDescription() always returns something
+        # which is a change from 2.6 and below, and breaks the
+        # testName plugin call.
+        try:
+            desc = self.test.shortDescription()
+        except Exception:
+            # this is probably caused by a problem in test.__str__() and is
+            # only triggered by python 3.1's unittest!
+            pass
+        try:
+            if desc == str(self.test):
+                return
+        except Exception:
+            # If str() triggers an exception then ignore it.
+            # see issue 422
+            pass
+        return desc
+
+
+class TestBase(unittest.TestCase):
+    """Common functionality for FunctionTestCase and MethodTestCase.
+    """
+    __test__ = False # do not collect
+
+    def id(self):
+        return str(self)
+
+    def runTest(self):
+        self.test(*self.arg)
+
+    def shortDescription(self):
+        if hasattr(self.test, 'description'):
+            return self.test.description
+        func, arg = self._descriptors()
+        doc = getattr(func, '__doc__', None)
+        if not doc:
+            doc = str(self)
+        return doc.strip().split("\n")[0].strip()
+
+
+class FunctionTestCase(TestBase):
+    """TestCase wrapper for test functions.
+
+    Don't use this class directly; it is used internally in nose to
+    create test cases for test functions.
+    """
+    __test__ = False # do not collect
+
+    def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
+                 descriptor=None):
+        """Initialize the MethodTestCase.
+
+        Required argument:
+
+        * test -- the test function to call.
+
+        Optional arguments:
+
+        * setUp -- function to run at setup.
+
+        * tearDown -- function to run at teardown.
+
+        * arg -- arguments to pass to the test function. This is to support
+          generator functions that yield arguments.
+
+        * descriptor -- the function, other than the test, that should be used
+          to construct the test name. This is to support generator functions.
+        """
+
+        self.test = test
+        self.setUpFunc = setUp
+        self.tearDownFunc = tearDown
+        self.arg = arg
+        self.descriptor = descriptor
+        TestBase.__init__(self)
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if self.descriptor is not None:
+            return test_address(self.descriptor)
+        else:
+            return test_address(self.test)
+
+    def _context(self):
+        return resolve_name(self.test.__module__)
+    context = property(_context, None, None,
+                      """Get context (module) of this test""")
+
+    def setUp(self):
+        """Run any setup function attached to the test function
+        """
+        if self.setUpFunc:
+            self.setUpFunc()
+        else:
+            names = ('setup', 'setUp', 'setUpFunc')
+            try_run(self.test, names)
+
+    def tearDown(self):
+        """Run any teardown function attached to the test function
+        """
+        if self.tearDownFunc:
+            self.tearDownFunc()
+        else:
+            names = ('teardown', 'tearDown', 'tearDownFunc')
+            try_run(self.test, names)
+
+    def __str__(self):
+        func, arg = self._descriptors()
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        name = "%s.%s" % (func.__module__, name)
+        if arg:
+            name = "%s%s" % (name, arg)
+        # FIXME need to include the full dir path to disambiguate
+        # in cases where test module of the same name was seen in
+        # another directory (old fromDirectory)
+        return name
+    __repr__ = __str__
+
+    def _descriptors(self):
+        """Get the descriptors of the test function: the function and
+        arguments that will be used to construct the test name. In
+        most cases, this is the function itself and no arguments. For
+        tests generated by generator functions, the original
+        (generator) function and args passed to the generated function
+        are returned.
+        """
+        if self.descriptor:
+            return self.descriptor, self.arg
+        else:
+            return self.test, self.arg
+
+
+class MethodTestCase(TestBase):
+    """Test case wrapper for test methods.
+
+    Don't use this class directly; it is used internally in nose to
+    create test cases for test methods.
+    """
+    __test__ = False # do not collect
+
+    def __init__(self, method, test=None, arg=tuple(), descriptor=None):
+        """Initialize the MethodTestCase.
+
+        Required argument:
+
+        * method -- the method to call, may be bound or unbound. In either
+          case, a new instance of the method's class will be instantiated to
+         make the call.  Note: In Python 3.x, if using an unbound method, you
+         must wrap it using pyversion.unbound_method.
+
+        Optional arguments:
+
+        * test -- the test function to call. If this is passed, it will be
+          called instead of getting a new bound method of the same name as the
+          desired method from the test instance. This is to support generator
+          methods that yield inline functions.
+
+        * arg -- arguments to pass to the test function. This is to support
+          generator methods that yield arguments.
+
+        * descriptor -- the function, other than the test, that should be used
+          to construct the test name. This is to support generator methods.
+        """
+        self.method = method
+        self.test = test
+        self.arg = arg
+        self.descriptor = descriptor
+        if isfunction(method):
+            raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
+        self.cls = method.im_class
+        self.inst = self.cls()
+        if self.test is None:
+            method_name = self.method.__name__
+            self.test = getattr(self.inst, method_name)
+        TestBase.__init__(self)
+
+    def __str__(self):
+        func, arg = self._descriptors()
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        name = "%s.%s.%s" % (self.cls.__module__,
+                             self.cls.__name__,
+                             name)
+        if arg:
+            name = "%s%s" % (name, arg)
+        return name
+    __repr__ = __str__
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if self.descriptor is not None:
+            return test_address(self.descriptor)
+        else:
+            return test_address(self.method)
+
+    def _context(self):
+        return self.cls
+    context = property(_context, None, None,
+                      """Get context (class) of this test""")
+
+    def setUp(self):
+        try_run(self.inst, ('setup', 'setUp'))
+
+    def tearDown(self):
+        try_run(self.inst, ('teardown', 'tearDown'))
+
+    def _descriptors(self):
+        """Get the descriptors of the test method: the method and
+        arguments that will be used to construct the test name. In
+        most cases, this is the method itself and no arguments. For
+        tests generated by generator methods, the original
+        (generator) method and args passed to the generated method 
+        or function are returned.
+        """
+        if self.descriptor:
+            return self.descriptor, self.arg
+        else:
+            return self.method, self.arg
diff --git a/nose/commands.py b/nose/commands.py
new file mode 100644 (file)
index 0000000..b819b38
--- /dev/null
@@ -0,0 +1,146 @@
+"""
+nosetests setuptools command
+----------------------------
+
+The easiest way to run tests with nose is to use the `nosetests` setuptools
+command::
+
+  python setup.py nosetests
+
+This command has one *major* benefit over the standard `test` command: *all
+nose plugins are supported*.
+
+To configure the `nosetests` command, add a [nosetests] section to your
+setup.cfg. The [nosetests] section can contain any command line arguments that
+nosetests supports. The differences between issuing an option on the command
+line and adding it to setup.cfg are:
+
+* In setup.cfg, the -- prefix must be excluded
+* In setup.cfg, command line flags that take no arguments must be given an
+  argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
+
+Here's an example [nosetests] setup.cfg section::
+
+  [nosetests]
+  verbosity=1
+  detailed-errors=1
+  with-coverage=1
+  cover-package=nose
+  debug=nose.loader
+  pdb=1
+  pdb-failures=1
+
+If you commonly run nosetests with a large number of options, using
+the nosetests setuptools command and configuring with setup.cfg can
+make running your tests much less tedious. (Note that the same options
+and format supported in setup.cfg are supported in all other config
+files, and the nosetests script will also load config files.)
+
+Another reason to run tests with the command is that the command will
+install packages listed in your `tests_require`, as well as doing a
+complete build of your package before running tests. For packages with
+dependencies or that build C extensions, using the setuptools command
+can be more convenient than building by hand and running the nosetests
+script.
+
+Bootstrapping
+-------------
+
+If you are distributing your project and want users to be able to run tests
+without having to install nose themselves, add nose to the setup_requires
+section of your setup()::
+
+  setup(
+      # ...
+      setup_requires=['nose>=1.0']
+      )
+
+This will direct setuptools to download and activate nose during the setup
+process, making the ``nosetests`` command available.
+
+"""
+try:
+    from setuptools import Command
+except ImportError:
+    Command = nosetests = None
+else:
+    from nose.config import Config, option_blacklist, user_config_files, \
+        flag, _bool
+    from nose.core import TestProgram
+    from nose.plugins import DefaultPluginManager
+
+
+    def get_user_options(parser):
+        """convert a optparse option list into a distutils option tuple list"""
+        opt_list = []
+        for opt in parser.option_list:
+            if opt._long_opts[0][2:] in option_blacklist: 
+                continue
+            long_name = opt._long_opts[0][2:]
+            if opt.action not in ('store_true', 'store_false'):
+                long_name = long_name + "="
+            short_name = None
+            if opt._short_opts:
+                short_name =  opt._short_opts[0][1:]
+            opt_list.append((long_name, short_name, opt.help or ""))
+        return opt_list
+
+
+    class nosetests(Command):
+        description = "Run unit tests using nosetests"
+        __config = Config(files=user_config_files(),
+                          plugins=DefaultPluginManager())
+        __parser = __config.getParser()
+        user_options = get_user_options(__parser)
+
+        def initialize_options(self):
+            """create the member variables, but change hyphens to
+            underscores
+            """
+
+            self.option_to_cmds = {}
+            for opt in self.__parser.option_list:
+                cmd_name = opt._long_opts[0][2:]
+                option_name = cmd_name.replace('-', '_')
+                self.option_to_cmds[option_name] = cmd_name
+                setattr(self, option_name, None)
+            self.attr  = None
+
+        def finalize_options(self):
+            """nothing to do here"""
+            pass
+
+        def run(self):
+            """ensure tests are capable of being run, then
+            run nose.main with a reconstructed argument list"""
+            self.run_command('egg_info')
+
+            # Build extensions in-place
+            self.reinitialize_command('build_ext', inplace=1)
+            self.run_command('build_ext')
+
+            if self.distribution.install_requires:
+                self.distribution.fetch_build_eggs(
+                    self.distribution.install_requires)
+            if self.distribution.tests_require:
+                self.distribution.fetch_build_eggs(
+                    self.distribution.tests_require)
+
+            argv = ['nosetests'] 
+            for (option_name, cmd_name) in self.option_to_cmds.items():
+                if option_name in option_blacklist:
+                    continue
+                value = getattr(self, option_name)
+                if value is not None:
+                    argv.extend(
+                        self.cfgToArg(option_name.replace('_', '-'), value))
+            TestProgram(argv=argv, config=self.__config)
+
+        def cfgToArg(self, optname, value):
+            argv = []
+            if flag(value):
+                if _bool(value):
+                    argv.append('--' + optname)
+            else:
+                argv.extend(['--' + optname, value])
+            return argv
diff --git a/nose/config.py b/nose/config.py
new file mode 100644 (file)
index 0000000..d787fed
--- /dev/null
@@ -0,0 +1,638 @@
+import logging
+import optparse
+import os
+import re
+import sys
+import ConfigParser
+from optparse import OptionParser
+from nose.util import absdir, tolist
+from nose.plugins.manager import NoPlugins
+from warnings import warn
+
+log = logging.getLogger(__name__)
+
+# not allowed in config files
+option_blacklist = ['help', 'verbose']
+
+config_files = [
+    # Linux users will prefer this
+    "~/.noserc",
+    # Windows users will prefer this
+    "~/nose.cfg"
+    ]
+
+# plaforms on which the exe check defaults to off
+# Windows and IronPython
+exe_allowed_platforms = ('win32', 'cli')
+
+
+class NoSuchOptionError(Exception):
+    def __init__(self, name):
+        Exception.__init__(self, name)
+        self.name = name
+
+
+class ConfigError(Exception):
+    pass
+
+
+class ConfiguredDefaultsOptionParser(object):
+    """
+    Handler for options from commandline and config files.
+    """
+    def __init__(self, parser, config_section, error=None, file_error=None):
+        self._parser = parser
+        self._config_section = config_section
+        if error is None:
+            error = self._parser.error
+        self._error = error
+        if file_error is None:
+            file_error = lambda msg, **kw: error(msg)
+        self._file_error = file_error
+
+    def _configTuples(self, cfg, filename):
+        config = []
+        if self._config_section in cfg.sections():
+            for name, value in cfg.items(self._config_section):
+                config.append((name, value, filename))
+        return config
+
+    def _readFromFilenames(self, filenames):
+        config = []
+        for filename in filenames:
+            cfg = ConfigParser.RawConfigParser()
+            try:
+                cfg.read(filename)
+            except ConfigParser.Error, exc:
+                raise ConfigError("Error reading config file %r: %s" %
+                                  (filename, str(exc)))
+            config.extend(self._configTuples(cfg, filename))
+        return config
+
+    def _readFromFileObject(self, fh):
+        cfg = ConfigParser.RawConfigParser()
+        try:
+            filename = fh.name
+        except AttributeError:
+            filename = '<???>'
+        try:
+            cfg.readfp(fh)
+        except ConfigParser.Error, exc:
+            raise ConfigError("Error reading config file %r: %s" %
+                              (filename, str(exc)))
+        return self._configTuples(cfg, filename)
+
+    def _readConfiguration(self, config_files):
+        try:
+            config_files.readline
+        except AttributeError:
+            filename_or_filenames = config_files
+            if isinstance(filename_or_filenames, basestring):
+                filenames = [filename_or_filenames]
+            else:
+                filenames = filename_or_filenames
+            config = self._readFromFilenames(filenames)
+        else:
+            fh = config_files
+            config = self._readFromFileObject(fh)
+        return config
+
+    def _processConfigValue(self, name, value, values, parser):
+        opt_str = '--' + name
+        option = parser.get_option(opt_str)
+        if option is None:
+            raise NoSuchOptionError(name)
+        else:
+            option.process(opt_str, value, values, parser)
+
+    def _applyConfigurationToValues(self, parser, config, values):
+        for name, value, filename in config:
+            if name in option_blacklist:
+                continue
+            try:
+                self._processConfigValue(name, value, values, parser)
+            except NoSuchOptionError, exc:
+                self._file_error(
+                    "Error reading config file %r: "
+                    "no such option %r" % (filename, exc.name),
+                    name=name, filename=filename)
+            except optparse.OptionValueError, exc:
+                msg = str(exc).replace('--' + name, repr(name), 1)
+                self._file_error("Error reading config file %r: "
+                                 "%s" % (filename, msg),
+                                 name=name, filename=filename)
+
+    def parseArgsAndConfigFiles(self, args, config_files):
+        values = self._parser.get_default_values()
+        try:
+            config = self._readConfiguration(config_files)
+        except ConfigError, exc:
+            self._error(str(exc))
+        else:
+            self._applyConfigurationToValues(self._parser, config, values)
+        return self._parser.parse_args(args, values)
+
+
+class Config(object):
+    """nose configuration.
+
+    Instances of Config are used throughout nose to configure
+    behavior, including plugin lists. Here are the default values for
+    all config keys::
+
+      self.env = env = kw.pop('env', {})
+      self.args = ()
+      self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+      self.addPaths = not env.get('NOSE_NOPATH', False)
+      self.configSection = 'nosetests'
+      self.debug = env.get('NOSE_DEBUG')
+      self.debugLog = env.get('NOSE_DEBUG_LOG')
+      self.exclude = None
+      self.getTestCaseNamesCompat = False
+      self.includeExe = env.get('NOSE_INCLUDE_EXE',
+                                sys.platform in exe_allowed_platforms)
+      self.ignoreFiles = (re.compile(r'^\.'),
+                          re.compile(r'^_'),
+                          re.compile(r'^setup\.py$')
+                          )
+      self.include = None
+      self.loggingConfig = None
+      self.logStream = sys.stderr
+      self.options = NoOptions()
+      self.parser = None
+      self.plugins = NoPlugins()
+      self.srcDirs = ('lib', 'src')
+      self.runOnInit = True
+      self.stopOnError = env.get('NOSE_STOP', False)
+      self.stream = sys.stderr
+      self.testNames = ()
+      self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+      self.where = ()
+      self.py3where = ()
+      self.workingDir = None   
+    """
+
+    def __init__(self, **kw):
+        self.env = env = kw.pop('env', {})
+        self.args = ()
+        self.testMatchPat = env.get('NOSE_TESTMATCH',
+                                    r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
+        self.testMatch = re.compile(self.testMatchPat)
+        self.addPaths = not env.get('NOSE_NOPATH', False)
+        self.configSection = 'nosetests'
+        self.debug = env.get('NOSE_DEBUG')
+        self.debugLog = env.get('NOSE_DEBUG_LOG')
+        self.exclude = None
+        self.getTestCaseNamesCompat = False
+        self.includeExe = env.get('NOSE_INCLUDE_EXE',
+                                  sys.platform in exe_allowed_platforms)
+        self.ignoreFilesDefaultStrings = [r'^\.',
+                                          r'^_',
+                                          r'^setup\.py$',
+                                          ]
+        self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
+        self.include = None
+        self.loggingConfig = None
+        self.logStream = sys.stderr
+        self.options = NoOptions()
+        self.parser = None
+        self.plugins = NoPlugins()
+        self.srcDirs = ('lib', 'src')
+        self.runOnInit = True
+        self.stopOnError = env.get('NOSE_STOP', False)
+        self.stream = sys.stderr
+        self.testNames = []
+        self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+        self.where = ()
+        self.py3where = ()
+        self.workingDir = os.getcwd()
+        self.traverseNamespace = False
+        self.firstPackageWins = False
+        self.parserClass = OptionParser
+        self.worker = False
+        
+        self._default = self.__dict__.copy()
+        self.update(kw)
+        self._orig = self.__dict__.copy()
+
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['stream']
+        del state['_orig']
+        del state['_default']
+        del state['env']
+        del state['logStream']
+        # FIXME remove plugins, have only plugin manager class
+        state['plugins'] = self.plugins.__class__
+        return state
+
+    def __setstate__(self, state):
+        plugincls = state.pop('plugins')
+        self.update(state)
+        self.worker = True
+        # FIXME won't work for static plugin lists
+        self.plugins = plugincls()
+        self.plugins.loadPlugins()
+        # needed so .can_configure gets set appropriately
+        dummy_parser = self.parserClass()
+        self.plugins.addOptions(dummy_parser, {})
+        self.plugins.configure(self.options, self)
+    
+    def __repr__(self):
+        d = self.__dict__.copy()
+        # don't expose env, could include sensitive info
+        d['env'] = {}
+        keys = [ k for k in d.keys()
+                 if not k.startswith('_') ]
+        keys.sort()
+        return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
+                                          for k in keys ])
+    __str__ = __repr__
+
+    def _parseArgs(self, argv, cfg_files):
+        def warn_sometimes(msg, name=None, filename=None):
+            if (hasattr(self.plugins, 'excludedOption') and
+                self.plugins.excludedOption(name)):
+                msg = ("Option %r in config file %r ignored: "
+                       "excluded by runtime environment" %
+                       (name, filename))
+                warn(msg, RuntimeWarning)
+            else:
+                raise ConfigError(msg)
+        parser = ConfiguredDefaultsOptionParser(
+            self.getParser(), self.configSection, file_error=warn_sometimes)
+        return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
+
+    def configure(self, argv=None, doc=None):
+        """Configure the nose running environment. Execute configure before
+        collecting tests with nose.TestCollector to enable output capture and
+        other features.
+        """
+        env = self.env
+        if argv is None:
+            argv = sys.argv
+
+        cfg_files = getattr(self, 'files', [])
+        options, args = self._parseArgs(argv, cfg_files)
+        # If -c --config has been specified on command line,
+        # load those config files and reparse
+        if getattr(options, 'files', []):
+            options, args = self._parseArgs(argv, options.files)
+
+        self.options = options
+        if args:
+            self.testNames = args
+        if options.testNames is not None:
+            self.testNames.extend(tolist(options.testNames))
+
+        if options.py3where is not None:
+            if sys.version_info >= (3,):
+                options.where = options.py3where
+
+        # `where` is an append action, so it can't have a default value 
+        # in the parser, or that default will always be in the list
+        if not options.where:
+            options.where = env.get('NOSE_WHERE', None)
+
+        # include and exclude also
+        if not options.ignoreFiles:
+            options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
+        if not options.include:
+            options.include = env.get('NOSE_INCLUDE', [])
+        if not options.exclude:
+            options.exclude = env.get('NOSE_EXCLUDE', [])
+
+        self.addPaths = options.addPaths
+        self.stopOnError = options.stopOnError
+        self.verbosity = options.verbosity
+        self.includeExe = options.includeExe
+        self.traverseNamespace = options.traverseNamespace
+        self.debug = options.debug
+        self.debugLog = options.debugLog
+        self.loggingConfig = options.loggingConfig
+        self.firstPackageWins = options.firstPackageWins
+        self.configureLogging()
+
+        if options.where is not None:
+            self.configureWhere(options.where)
+        
+        if options.testMatch:
+            self.testMatch = re.compile(options.testMatch)
+        
+        if options.ignoreFiles:
+            self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
+            log.info("Ignoring files matching %s", options.ignoreFiles)
+        else:
+            log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
+        
+        if options.include:
+            self.include = map(re.compile, tolist(options.include))
+            log.info("Including tests matching %s", options.include)
+
+        if options.exclude:
+            self.exclude = map(re.compile, tolist(options.exclude))
+            log.info("Excluding tests matching %s", options.exclude)
+
+        # When listing plugins we don't want to run them
+        if not options.showPlugins:
+            self.plugins.configure(options, self)
+            self.plugins.begin()
+
+    def configureLogging(self):
+        """Configure logging for nose, or optionally other packages. Any logger
+        name may be set with the debug option, and that logger will be set to
+        debug level and be assigned the same handler as the nose loggers, unless
+        it already has a handler.
+        """
+        if self.loggingConfig:
+            from logging.config import fileConfig
+            fileConfig(self.loggingConfig)
+            return
+        
+        format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
+        if self.debugLog:
+            handler = logging.FileHandler(self.debugLog)
+        else:
+            handler = logging.StreamHandler(self.logStream)
+        handler.setFormatter(format)
+
+        logger = logging.getLogger('nose')
+        logger.propagate = 0
+
+        # only add our default handler if there isn't already one there
+        # this avoids annoying duplicate log messages.
+        if handler not in logger.handlers:
+            logger.addHandler(handler)
+
+        # default level    
+        lvl = logging.WARNING
+        if self.verbosity >= 5:
+            lvl = 0
+        elif self.verbosity >= 4:
+            lvl = logging.DEBUG
+        elif self.verbosity >= 3:
+            lvl = logging.INFO
+        logger.setLevel(lvl)
+
+        # individual overrides
+        if self.debug:
+            # no blanks
+            debug_loggers = [ name for name in self.debug.split(',')
+                              if name ]
+            for logger_name in debug_loggers:
+                l = logging.getLogger(logger_name)
+                l.setLevel(logging.DEBUG)
+                if not l.handlers and not logger_name.startswith('nose'):
+                    l.addHandler(handler)
+
+    def configureWhere(self, where):
+        """Configure the working directory or directories for the test run.
+        """
+        from nose.importer import add_path
+        self.workingDir = None
+        where = tolist(where)
+        warned = False
+        for path in where:
+            if not self.workingDir:
+                abs_path = absdir(path)
+                if abs_path is None:
+                    raise ValueError("Working directory %s not found, or "
+                                     "not a directory" % path)
+                log.info("Set working dir to %s", abs_path)
+                self.workingDir = abs_path
+                if self.addPaths and \
+                       os.path.exists(os.path.join(abs_path, '__init__.py')):
+                    log.info("Working directory %s is a package; "
+                             "adding to sys.path" % abs_path)
+                    add_path(abs_path)
+                continue
+            if not warned:
+                warn("Use of multiple -w arguments is deprecated and "
+                     "support may be removed in a future release. You can "
+                     "get the same behavior by passing directories without "
+                     "the -w argument on the command line, or by using the "
+                     "--tests argument in a configuration file.",
+                     DeprecationWarning)
+            self.testNames.append(path)
+
+    def default(self):
+        """Reset all config values to defaults.
+        """
+        self.__dict__.update(self._default)
+
+    def getParser(self, doc=None):
+        """Get the command line option parser.
+        """
+        if self.parser:
+            return self.parser
+        env = self.env
+        parser = self.parserClass(doc)
+        parser.add_option(
+            "-V","--version", action="store_true",
+            dest="version", default=False,
+            help="Output nose version and exit")
+        parser.add_option(
+            "-p", "--plugins", action="store_true",
+            dest="showPlugins", default=False,
+            help="Output list of available plugins and exit. Combine with "
+            "higher verbosity for greater detail")
+        parser.add_option(
+            "-v", "--verbose",
+            action="count", dest="verbosity",
+            default=self.verbosity,
+            help="Be more verbose. [NOSE_VERBOSE]")
+        parser.add_option(
+            "--verbosity", action="store", dest="verbosity",
+            metavar='VERBOSITY',
+            type="int", help="Set verbosity; --verbosity=2 is "
+            "the same as -v")
+        parser.add_option(
+            "-q", "--quiet", action="store_const", const=0, dest="verbosity",
+            help="Be less verbose")
+        parser.add_option(
+            "-c", "--config", action="append", dest="files",
+            metavar="FILES",
+            help="Load configuration from config file(s). May be specified "
+            "multiple times; in that case, all config files will be "
+            "loaded and combined")
+        parser.add_option(
+            "-w", "--where", action="append", dest="where",
+            metavar="WHERE",
+            help="Look for tests in this directory. "
+            "May be specified multiple times. The first directory passed "
+            "will be used as the working directory, in place of the current "
+            "working directory, which is the default. Others will be added "
+            "to the list of tests to execute. [NOSE_WHERE]"
+            )
+        parser.add_option(
+            "--py3where", action="append", dest="py3where",
+            metavar="PY3WHERE",
+            help="Look for tests in this directory under Python 3.x. "
+            "Functions the same as 'where', but only applies if running under "
+            "Python 3.x or above.  Note that, if present under 3.x, this "
+            "option completely replaces any directories specified with "
+            "'where', so the 'where' option becomes ineffective. "
+            "[NOSE_PY3WHERE]"
+            )
+        parser.add_option(
+            "-m", "--match", "--testmatch", action="store",
+            dest="testMatch", metavar="REGEX",
+            help="Files, directories, function names, and class names "
+            "that match this regular expression are considered tests.  "
+            "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
+            default=self.testMatchPat)
+        parser.add_option(
+            "--tests", action="store", dest="testNames", default=None,
+            metavar='NAMES',
+            help="Run these tests (comma-separated list). This argument is "
+            "useful mainly from configuration files; on the command line, "
+            "just pass the tests to run as additional arguments with no "
+            "switch.")
+        parser.add_option(
+            "-l", "--debug", action="store",
+            dest="debug", default=self.debug,
+            help="Activate debug logging for one or more systems. "
+            "Available debug loggers: nose, nose.importer, "
+            "nose.inspector, nose.plugins, nose.result and "
+            "nose.selector. Separate multiple names with a comma.")
+        parser.add_option(
+            "--debug-log", dest="debugLog", action="store",
+            default=self.debugLog, metavar="FILE",
+            help="Log debug messages to this file "
+            "(default: sys.stderr)")
+        parser.add_option(
+            "--logging-config", "--log-config",
+            dest="loggingConfig", action="store",
+            default=self.loggingConfig, metavar="FILE",
+            help="Load logging config from this file -- bypasses all other"
+            " logging config settings.")
+        parser.add_option(
+            "-I", "--ignore-files", action="append", dest="ignoreFiles",
+            metavar="REGEX",
+            help="Completely ignore any file that matches this regular "
+            "expression. Takes precedence over any other settings or "
+            "plugins. "
+            "Specifying this option will replace the default setting. "
+            "Specify this option multiple times "
+            "to add more regular expressions [NOSE_IGNORE_FILES]")
+        parser.add_option(
+            "-e", "--exclude", action="append", dest="exclude",
+            metavar="REGEX",
+            help="Don't run tests that match regular "
+            "expression [NOSE_EXCLUDE]")
+        parser.add_option(
+            "-i", "--include", action="append", dest="include",
+            metavar="REGEX",
+            help="This regular expression will be applied to files, "
+            "directories, function names, and class names for a chance "
+            "to include additional tests that do not match TESTMATCH.  "
+            "Specify this option multiple times "
+            "to add more regular expressions [NOSE_INCLUDE]")
+        parser.add_option(
+            "-x", "--stop", action="store_true", dest="stopOnError",
+            default=self.stopOnError,
+            help="Stop running tests after the first error or failure")
+        parser.add_option(
+            "-P", "--no-path-adjustment", action="store_false",
+            dest="addPaths",
+            default=self.addPaths,
+            help="Don't make any changes to sys.path when "
+            "loading tests [NOSE_NOPATH]")
+        parser.add_option(
+            "--exe", action="store_true", dest="includeExe",
+            default=self.includeExe,
+            help="Look for tests in python modules that are "
+            "executable. Normal behavior is to exclude executable "
+            "modules, since they may not be import-safe "
+            "[NOSE_INCLUDE_EXE]")
+        parser.add_option(
+            "--noexe", action="store_false", dest="includeExe",
+            help="DO NOT look for tests in python modules that are "
+            "executable. (The default on the windows platform is to "
+            "do so.)")
+        parser.add_option(
+            "--traverse-namespace", action="store_true",
+            default=self.traverseNamespace, dest="traverseNamespace",
+            help="Traverse through all path entries of a namespace package")
+        parser.add_option(
+            "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
+            action="store_true", default=False, dest="firstPackageWins",
+            help="nose's importer will normally evict a package from sys."
+            "modules if it sees a package with the same name in a different "
+            "location. Set this option to disable that behavior.")
+
+        self.plugins.loadPlugins()
+        self.pluginOpts(parser)
+
+        self.parser = parser
+        return parser
+
+    def help(self, doc=None):
+        """Return the generated help message
+        """
+        return self.getParser(doc).format_help()
+
+    def pluginOpts(self, parser):
+        self.plugins.addOptions(parser, self.env)
+
+    def reset(self):
+        self.__dict__.update(self._orig)
+
+    def todict(self):
+        return self.__dict__.copy()
+        
+    def update(self, d):
+        self.__dict__.update(d)
+
+
+class NoOptions(object):
+    """Options container that returns None for all options.
+    """
+    def __getstate__(self):
+        return {}
+    
+    def __setstate__(self, state):
+        pass
+
+    def __getnewargs__(self):
+        return ()
+    
+    def __getattr__(self, attr):
+        return None
+
+    def __nonzero__(self):
+        return False
+
+
+def user_config_files():
+    """Return path to any existing user config files
+    """
+    return filter(os.path.exists,
+                  map(os.path.expanduser, config_files))
+
+
+def all_config_files():
+    """Return path to any existing user config files, plus any setup.cfg
+    in the current working directory.
+    """
+    user = user_config_files()
+    if os.path.exists('setup.cfg'):
+        return user + ['setup.cfg']
+    return user
+
+
+# used when parsing config files
+def flag(val):
+    """Does the value look like an on/off flag?"""
+    if val == 1:
+        return True
+    elif val == 0:
+        return False
+    val = str(val)
+    if len(val) > 5:
+        return False
+    return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
+
+
+def _bool(val):
+    return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/nose/core.py b/nose/core.py
new file mode 100644 (file)
index 0000000..e219903
--- /dev/null
@@ -0,0 +1,324 @@
+"""Implements nose test program and collector.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import time
+import unittest
+
+from nose.config import Config, all_config_files
+from nose.loader import defaultTestLoader
+from nose.plugins.manager import PluginManager, DefaultPluginManager, \
+     RestrictedPluginManager
+from nose.result import TextTestResult
+from nose.suite import FinalizingSuiteWrapper
+from nose.util import isclass, tolist
+
+
+log = logging.getLogger('nose.core')
+compat_24 = sys.version_info >= (2, 4)
+
+__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
+           'TextTestRunner']
+
+            
+class TextTestRunner(unittest.TextTestRunner):
+    """Test runner that uses nose's TextTestResult to enable errorClasses,
+    as well as providing hooks for plugins to override or replace the test
+    output stream, results, and the test case itself.
+    """    
+    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
+                 config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
+
+    
+    def _makeResult(self):
+        return TextTestResult(self.stream,
+                              self.descriptions,
+                              self.verbosity,
+                              self.config)
+
+    def run(self, test):
+        """Overrides to provide plugin hooks and defer all output to
+        the test result class.
+        """
+        wrapper = self.config.plugins.prepareTest(test)
+        if wrapper is not None:
+            test = wrapper
+        
+        # plugins can decorate or capture the output stream
+        wrapped = self.config.plugins.setOutputStream(self.stream)
+        if wrapped is not None:
+            self.stream = wrapped
+            
+        result = self._makeResult()
+        start = time.time()
+        test(result)
+        stop = time.time()
+        result.printErrors()
+        result.printSummary(start, stop)
+        self.config.plugins.finalize(result)
+        return result
+
+    
+class TestProgram(unittest.TestProgram):
+    """Collect and run tests, returning success or failure.
+
+    The arguments to TestProgram() are the same as to
+    :func:`main()` and :func:`run()`:
+
+    * module: All tests are in this module (default: None)
+    * defaultTest: Tests to load (default: '.')
+    * argv: Command line arguments (default: None; sys.argv is read)
+    * testRunner: Test runner instance (default: None)
+    * testLoader: Test loader instance (default: None)
+    * env: Environment; ignored if config is provided (default: None;
+      os.environ is read)
+    * config: :class:`nose.config.Config` instance (default: None)
+    * suite: Suite or list of tests to run (default: None). Passing a
+      suite or lists of tests will bypass all test discovery and
+      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+      instance as the suite, context fixtures at the class, module and
+      package level will not be used, and many plugin hooks will not
+      be called. If you want normal nose behavior, either pass a list
+      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+    * exit: Exit after running tests and printing report (default: True)
+    * plugins: List of plugins to use; ignored if config is provided
+      (default: load plugins with DefaultPluginManager)
+    * addplugins: List of **extra** plugins to use. Pass a list of plugin
+      instances in this argument to make custom plugins available while
+      still using the DefaultPluginManager.
+    """
+    verbosity = 1
+
+    def __init__(self, module=None, defaultTest='.', argv=None,
+                 testRunner=None, testLoader=None, env=None, config=None,
+                 suite=None, exit=True, plugins=None, addplugins=None):
+        if env is None:
+            env = os.environ
+        if config is None:
+            config = self.makeConfig(env, plugins)
+        if addplugins:
+            config.plugins.addPlugins(addplugins)
+        self.config = config
+        self.suite = suite
+        self.exit = exit
+        extra_args = {}
+        version = sys.version_info[0:2]
+        if version >= (2,7) and version != (3,0):
+            extra_args['exit'] = exit
+        unittest.TestProgram.__init__(
+            self, module=module, defaultTest=defaultTest,
+            argv=argv, testRunner=testRunner, testLoader=testLoader,
+            **extra_args)
+
+    def makeConfig(self, env, plugins=None):
+        """Load a Config, pre-filled with user config files if any are
+        found.
+        """
+        cfg_files = all_config_files()        
+        if plugins:
+            manager = PluginManager(plugins=plugins)
+        else:
+            manager = DefaultPluginManager()
+        return Config(
+            env=env, files=cfg_files, plugins=manager)
+        
+    def parseArgs(self, argv):
+        """Parse argv and env and configure running environment.
+        """
+        self.config.configure(argv, doc=self.usage())
+        log.debug("configured %s", self.config)
+
+        # quick outs: version, plugins (optparse would have already
+        # caught and exited on help)
+        if self.config.options.version:
+            from nose import __version__
+            sys.stdout = sys.__stdout__
+            print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
+            sys.exit(0)
+
+        if self.config.options.showPlugins:
+            self.showPlugins()
+            sys.exit(0)
+        
+        if self.testLoader is None:
+            self.testLoader = defaultTestLoader(config=self.config)
+        elif isclass(self.testLoader):
+            self.testLoader = self.testLoader(config=self.config)
+        plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
+        if plug_loader is not None:
+            self.testLoader = plug_loader
+        log.debug("test loader is %s", self.testLoader)
+        
+        # FIXME if self.module is a string, add it to self.testNames? not sure
+
+        if self.config.testNames:
+            self.testNames = self.config.testNames
+        else:
+            self.testNames = tolist(self.defaultTest)
+        log.debug('defaultTest %s', self.defaultTest)
+        log.debug('Test names are %s', self.testNames)
+        if self.config.workingDir is not None:
+            os.chdir(self.config.workingDir)
+        self.createTests()
+        
+    def createTests(self):
+        """Create the tests to run. If a self.suite
+        is set, then that suite will be used. Otherwise, tests will be
+        loaded from the given test names (self.testNames) using the
+        test loader.
+        """
+        log.debug("createTests called with %s", self.suite)
+        if self.suite is not None:
+            # We were given an explicit suite to run. Make sure it's
+            # loaded and wrapped correctly.
+            self.test = self.testLoader.suiteClass(self.suite)
+        else:
+            self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+    def runTests(self):
+        """Run Tests. Returns true on success, false on failure, and sets
+        self.success to the same value.
+        """
+        log.debug("runTests called")
+        if self.testRunner is None:
+            self.testRunner = TextTestRunner(stream=self.config.stream,
+                                             verbosity=self.config.verbosity,
+                                             config=self.config)
+        plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+        if plug_runner is not None:
+            self.testRunner = plug_runner
+        result = self.testRunner.run(self.test)
+        self.success = result.wasSuccessful()
+        if self.exit:
+            sys.exit(not self.success)
+        return self.success
+
+    def showPlugins(self):
+        """Print list of available plugins.
+        """
+        import textwrap
+
+        class DummyParser:
+            def __init__(self):
+                self.options = []
+            def add_option(self, *arg, **kw):
+                self.options.append((arg, kw.pop('help', '')))
+        
+        v = self.config.verbosity
+        self.config.plugins.sort()
+        for p in self.config.plugins:            
+            print "Plugin %s" % p.name
+            if v >= 2:
+                print "  score: %s" % p.score
+                print '\n'.join(textwrap.wrap(p.help().strip(),
+                                              initial_indent='  ',
+                                              subsequent_indent='  '))
+                if v >= 3:
+                    parser = DummyParser()
+                    p.addOptions(parser)
+                    if len(parser.options):
+                        print
+                        print "  Options:"
+                        for opts, help in parser.options:
+                            print '  %s' % (', '.join(opts))
+                            if help:
+                                print '\n'.join(
+                                    textwrap.wrap(help.strip(),
+                                                  initial_indent='    ',
+                                                  subsequent_indent='    '))
+                print
+            
+    def usage(cls):
+        import nose
+        if hasattr(nose, '__loader__'):
+            ld = nose.__loader__
+            if hasattr(ld, 'zipfile'):
+                # nose was imported from a zipfile
+                return ld.get_data(
+                        os.path.join(ld.prefix, 'nose', 'usage.txt'))
+        return open(os.path.join(
+                os.path.dirname(__file__), 'usage.txt'), 'r').read()
+    usage = classmethod(usage)
+
+# backwards compatibility
+run_exit = main = TestProgram
+
+
+def run(*arg, **kw):
+    """Collect and run tests, returning success or failure.
+
+    The arguments to `run()` are the same as to `main()`:
+
+    * module: All tests are in this module (default: None)
+    * defaultTest: Tests to load (default: '.')
+    * argv: Command line arguments (default: None; sys.argv is read)
+    * testRunner: Test runner instance (default: None)
+    * testLoader: Test loader instance (default: None)
+    * env: Environment; ignored if config is provided (default: None;
+      os.environ is read)
+    * config: :class:`nose.config.Config` instance (default: None)
+    * suite: Suite or list of tests to run (default: None). Passing a
+      suite or lists of tests will bypass all test discovery and
+      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+      instance as the suite, context fixtures at the class, module and
+      package level will not be used, and many plugin hooks will not
+      be called. If you want normal nose behavior, either pass a list
+      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+    * plugins: List of plugins to use; ignored if config is provided
+      (default: load plugins with DefaultPluginManager)
+    * addplugins: List of **extra** plugins to use. Pass a list of plugin
+      instances in this argument to make custom plugins available while
+      still using the DefaultPluginManager.
+      
+    With the exception that the ``exit`` argument is always set
+    to False.    
+    """
+    kw['exit'] = False
+    return TestProgram(*arg, **kw).success
+
+
+def runmodule(name='__main__', **kw):
+    """Collect and run tests in a single module only. Defaults to running
+    tests in __main__. Additional arguments to TestProgram may be passed
+    as keyword arguments.
+    """
+    main(defaultTest=name, **kw)
+
+
+def collector():
+    """TestSuite replacement entry point. Use anywhere you might use a
+    unittest.TestSuite. The collector will, by default, load options from
+    all config files and execute loader.loadTestsFromNames() on the
+    configured testNames, or '.' if no testNames are configured.
+    """    
+    # plugins that implement any of these methods are disabled, since
+    # we don't control the test runner and won't be able to run them
+    # finalize() is also not called, but plugins that use it aren't disabled,
+    # because capture needs it.
+    setuptools_incompat = ('report', 'prepareTest',
+                           'prepareTestLoader', 'prepareTestRunner',
+                           'setOutputStream')
+
+    plugins = RestrictedPluginManager(exclude=setuptools_incompat)
+    conf = Config(files=all_config_files(),
+                  plugins=plugins)
+    conf.configure(argv=['collector'])
+    loader = defaultTestLoader(conf)
+
+    if conf.testNames:
+        suite = loader.loadTestsFromNames(conf.testNames)
+    else:
+        suite = loader.loadTestsFromNames(('.',))
+    return FinalizingSuiteWrapper(suite, plugins.finalize)
+
+
+
+if __name__ == '__main__':
+    main()
diff --git a/nose/exc.py b/nose/exc.py
new file mode 100644 (file)
index 0000000..8b780db
--- /dev/null
@@ -0,0 +1,9 @@
+"""Exceptions for marking tests as skipped or deprecated.
+
+This module exists to provide backwards compatibility with previous
+versions of nose where skipped and deprecated tests were core
+functionality, rather than being provided by plugins. It may be
+removed in a future release.
+"""
+from nose.plugins.skip import SkipTest
+from nose.plugins.deprecated import DeprecatedTest
diff --git a/nose/ext/__init__.py b/nose/ext/__init__.py
new file mode 100644 (file)
index 0000000..5fd1516
--- /dev/null
@@ -0,0 +1,3 @@
+"""
+External or vendor files
+"""
diff --git a/nose/ext/dtcompat.py b/nose/ext/dtcompat.py
new file mode 100644 (file)
index 0000000..332cf08
--- /dev/null
@@ -0,0 +1,2272 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+#     Jim Fulton
+#     Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+#
+# Modified for inclusion in nose to provide support for DocFileTest in
+# python 2.3:
+#
+# - all doctests removed from module (they fail under 2.3 and 2.5) 
+# - now handles the $py.class extension when ran under Jython
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+    import doctest
+    doctest.testmod()
+
+if __name__ == "__main__":
+    _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False".  In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests.  There are also many ways to override parts
+of doctest's default behaviors.  See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+    # 0, Option Flags
+    'register_optionflag',
+    'DONT_ACCEPT_TRUE_FOR_1',
+    'DONT_ACCEPT_BLANKLINE',
+    'NORMALIZE_WHITESPACE',
+    'ELLIPSIS',
+    'IGNORE_EXCEPTION_DETAIL',
+    'COMPARISON_FLAGS',
+    'REPORT_UDIFF',
+    'REPORT_CDIFF',
+    'REPORT_NDIFF',
+    'REPORT_ONLY_FIRST_FAILURE',
+    'REPORTING_FLAGS',
+    # 1. Utility Functions
+    'is_private',
+    # 2. Example & DocTest
+    'Example',
+    'DocTest',
+    # 3. Doctest Parser
+    'DocTestParser',
+    # 4. Doctest Finder
+    'DocTestFinder',
+    # 5. Doctest Runner
+    'DocTestRunner',
+    'OutputChecker',
+    'DocTestFailure',
+    'UnexpectedException',
+    'DebugRunner',
+    # 6. Test Functions
+    'testmod',
+    'testfile',
+    'run_docstring_examples',
+    # 7. Tester
+    'Tester',
+    # 8. Unittest Support
+    'DocTestSuite',
+    'DocFileSuite',
+    'set_unittest_reportflags',
+    # 9. Debugging Support
+    'script_from_examples',
+    'testsource',
+    'debug_src',
+    'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+                        __name__, 0)
+
+# There are 4 basic classes:
+#  - Example: a <source, want> pair, plus an intra-docstring line number.
+#  - DocTest: a collection of examples, parsed from a docstring, plus
+#    info about where the docstring came from (name, filename, lineno).
+#  - DocTestFinder: extracts DocTests from a given object's docstring and
+#    its contained objects' docstrings.
+#  - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+#                             list of:
+# +------+                   +---------+                   +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+                   +---------+                   +-------+
+#                            | Example |
+#                            |   ...   |
+#                            | Example |
+#                            +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+    # Create a new flag unless `name` is already known.
+    return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+                    DONT_ACCEPT_BLANKLINE |
+                    NORMALIZE_WHITESPACE |
+                    ELLIPSIS |
+                    IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+                   REPORT_CDIFF |
+                   REPORT_NDIFF |
+                   REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+#  1. Utility Functions
+#  2. Example & DocTest -- store test cases
+#  3. DocTest Parser -- extracts examples from strings
+#  4. DocTest Finder -- extracts test cases from objects
+#  5. DocTest Runner -- runs test cases
+#  6. Test Functions -- convenient wrappers for testing
+#  7. Tester Class -- for backwards compatibility
+#  8. Unittest Support
+#  9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+    """prefix, base -> true iff name prefix + "." + base is "private".
+
+    Prefix may be an empty string, and base does not contain a period.
+    Prefix is ignored (although functions you write conforming to this
+    protocol may make use of it).
+    Return true iff base begins with an (at least one) underscore, but
+    does not both begin and end with (at least) two underscores.
+    """
+    warnings.warn("is_private is deprecated; it wasn't useful; "
+                  "examine DocTestFinder.find() lists instead",
+                  DeprecationWarning, stacklevel=2)
+    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+    """
+    Return the compiler-flags associated with the future features that
+    have been imported into the given namespace (globs).
+    """
+    flags = 0
+    for fname in __future__.all_feature_names:
+        feature = globs.get(fname, None)
+        if feature is getattr(__future__, fname):
+            flags |= feature.compiler_flag
+    return flags
+
+def _normalize_module(module, depth=2):
+    """
+    Return the module specified by `module`.  In particular:
+      - If `module` is a module, then return module.
+      - If `module` is a string, then import and return the
+        module with that name.
+      - If `module` is None, then return the calling module.
+        The calling module is assumed to be the module of
+        the stack frame at the given depth in the call stack.
+    """
+    if inspect.ismodule(module):
+        return module
+    elif isinstance(module, (str, unicode)):
+        return __import__(module, globals(), locals(), ["*"])
+    elif module is None:
+        return sys.modules[sys._getframe(depth).f_globals['__name__']]
+    else:
+        raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+    """
+    Add the given number of space characters to the beginning every
+    non-blank line in `s`, and return the result.
+    """
+    # This regexp matches the start of non-blank lines:
+    return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+    """
+    Return a string containing a traceback message for the given
+    exc_info tuple (as returned by sys.exc_info()).
+    """
+    # Get a traceback message.
+    excout = StringIO()
+    exc_type, exc_val, exc_tb = exc_info
+    traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+    return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+    def getvalue(self):
+        result = StringIO.getvalue(self)
+        # If anything at all was written, make sure there's a trailing
+        # newline.  There's no way for the expected output to indicate
+        # that a trailing newline is missing.
+        if result and not result.endswith("\n"):
+            result += "\n"
+        # Prevent softspace from screwing up the next test case, in
+        # case they used print with a trailing comma in an example.
+        if hasattr(self, "softspace"):
+            del self.softspace
+        return result
+
+    def truncate(self,   size=None):
+        StringIO.truncate(self, size)
+        if hasattr(self, "softspace"):
+            del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+    if ELLIPSIS_MARKER not in want:
+        return want == got
+
+    # Find "the real" strings.
+    ws = want.split(ELLIPSIS_MARKER)
+    assert len(ws) >= 2
+
+    # Deal with exact matches possibly needed at one or both ends.
+    startpos, endpos = 0, len(got)
+    w = ws[0]
+    if w:   # starts with exact match
+        if got.startswith(w):
+            startpos = len(w)
+            del ws[0]
+        else:
+            return False
+    w = ws[-1]
+    if w:   # ends with exact match
+        if got.endswith(w):
+            endpos -= len(w)
+            del ws[-1]
+        else:
+            return False
+
+    if startpos > endpos:
+        # Exact end matches required more characters than we have, as in
+        # _ellipsis_match('aa...aa', 'aaa')
+        return False
+
+    # For the rest, we only need to find the leftmost non-overlapping
+    # match for each piece.  If there's no overall match that way alone,
+    # there's no overall match period.
+    for w in ws:
+        # w may be '' at times, if there are consecutive ellipses, or
+        # due to an ellipsis at the start or end of `want`.  That's OK.
+        # Search for an empty string succeeds, and doesn't change startpos.
+        startpos = got.find(w, startpos, endpos)
+        if startpos < 0:
+            return False
+        startpos += len(w)
+
+    return True
+
+def _comment_line(line):
+    "Return a commented form of the given line"
+    line = line.rstrip()
+    if line:
+        return '# '+line
+    else:
+        return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+    """
+    A specialized version of the python debugger that redirects stdout
+    to a given stream when interacting with the user.  Stdout is *not*
+    redirected when traced code is executed.
+    """
+    def __init__(self, out):
+        self.__out = out
+        pdb.Pdb.__init__(self)
+
+    def trace_dispatch(self, *args):
+        # Redirect stdout to the given stream.
+        save_stdout = sys.stdout
+        sys.stdout = self.__out
+        # Call Pdb's trace dispatch method.
+        try:
+            return pdb.Pdb.trace_dispatch(self, *args)
+        finally:
+            sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+    if not inspect.ismodule(module):
+        raise TypeError, 'Expected a module: %r' % module
+    if path.startswith('/'):
+        raise ValueError, 'Module-relative files may not have absolute paths'
+
+    # Find the base directory for the path.
+    if hasattr(module, '__file__'):
+        # A normal module/package
+        basedir = os.path.split(module.__file__)[0]
+    elif module.__name__ == '__main__':
+        # An interactive session.
+        if len(sys.argv)>0 and sys.argv[0] != '':
+            basedir = os.path.split(sys.argv[0])[0]
+        else:
+            basedir = os.curdir
+    else:
+        # A module w/o __file__ (this includes builtins)
+        raise ValueError("Can't resolve paths relative to the module " +
+                         module + " (it has no __file__)")
+
+    # Combine the base directory and the path.
+    return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+##   fragment of source code, and "want" is the expected output for
+##   "source."  The Example class also includes information about
+##   where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+##   a string (such as an object's docstring).  The DocTest class also
+##   includes information about where the string was extracted from.
+
+class Example:
+    """
+    A single doctest example, consisting of source code and expected
+    output.  `Example` defines the following attributes:
+
+      - source: A single Python statement, always ending with a newline.
+        The constructor adds a newline if needed.
+
+      - want: The expected output from running the source code (either
+        from stdout, or a traceback in case of exception).  `want` ends
+        with a newline unless it's empty, in which case it's an empty
+        string.  The constructor adds a newline if needed.
+
+      - exc_msg: The exception message generated by the example, if
+        the example is expected to generate an exception; or `None` if
+        it is not expected to generate an exception.  This exception
+        message is compared against the return value of
+        `traceback.format_exception_only()`.  `exc_msg` ends with a
+        newline unless it's `None`.  The constructor adds a newline
+        if needed.
+
+      - lineno: The line number within the DocTest string containing
+        this Example where the Example begins.  This line number is
+        zero-based, with respect to the beginning of the DocTest.
+
+      - indent: The example's indentation in the DocTest string.
+        I.e., the number of space characters that preceed the
+        example's first prompt.
+
+      - options: A dictionary mapping from option flags to True or
+        False, which is used to override default options for this
+        example.  Any option flags not contained in this dictionary
+        are left at their default value (as specified by the
+        DocTestRunner's optionflags).  By default, no options are set.
+    """
+    def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+                 options=None):
+        # Normalize inputs.
+        if not source.endswith('\n'):
+            source += '\n'
+        if want and not want.endswith('\n'):
+            want += '\n'
+        if exc_msg is not None and not exc_msg.endswith('\n'):
+            exc_msg += '\n'
+        # Store properties.
+        self.source = source
+        self.want = want
+        self.lineno = lineno
+        self.indent = indent
+        if options is None: options = {}
+        self.options = options
+        self.exc_msg = exc_msg
+
+class DocTest:
+    """
+    A collection of doctest examples that should be run in a single
+    namespace.  Each `DocTest` defines the following attributes:
+
+      - examples: the list of examples.
+
+      - globs: The namespace (aka globals) that the examples should
+        be run in.
+
+      - name: A name identifying the DocTest (typically, the name of
+        the object whose docstring this DocTest was extracted from).
+
+      - filename: The name of the file that this DocTest was extracted
+        from, or `None` if the filename is unknown.
+
+      - lineno: The line number within filename where this DocTest
+        begins, or `None` if the line number is unavailable.  This
+        line number is zero-based, with respect to the beginning of
+        the file.
+
+      - docstring: The string that the examples were extracted from,
+        or `None` if the string is unavailable.
+    """
+    def __init__(self, examples, globs, name, filename, lineno, docstring):
+        """
+        Create a new DocTest containing the given examples.  The
+        DocTest's globals are initialized with a copy of `globs`.
+        """
+        assert not isinstance(examples, basestring), \
+               "DocTest no longer accepts str; use DocTestParser instead"
+        self.examples = examples
+        self.docstring = docstring
+        self.globs = globs.copy()
+        self.name = name
+        self.filename = filename
+        self.lineno = lineno
+
+    def __repr__(self):
+        if len(self.examples) == 0:
+            examples = 'no examples'
+        elif len(self.examples) == 1:
+            examples = '1 example'
+        else:
+            examples = '%d examples' % len(self.examples)
+        return ('<DocTest %s from %s:%s (%s)>' %
+                (self.name, self.filename, self.lineno, examples))
+
+
+    # This lets us sort tests by name:
+    def __cmp__(self, other):
+        if not isinstance(other, DocTest):
+            return -1
+        return cmp((self.name, self.filename, self.lineno, id(self)),
+                   (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+    """
+    A class used to parse strings containing doctest examples.
+    """
+    # This regular expression is used to find doctest examples in a
+    # string.  It defines three groups: `source` is the source code
+    # (including leading indentation and prompts); `indent` is the
+    # indentation of the first (PS1) line of the source code; and
+    # `want` is the expected output (including leading indentation).
+    _EXAMPLE_RE = re.compile(r'''
+        # Source consists of a PS1 line followed by zero or more PS2 lines.
+        (?P<source>
+            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
+            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
+        \n?
+        # Want consists of any non-blank lines that do not start with PS1.
+        (?P<want> (?:(?![ ]*$)    # Not a blank line
+                     (?![ ]*>>>)  # Not a line starting with PS1
+                     .*$\n?       # But any other line
+                  )*)
+        ''', re.MULTILINE | re.VERBOSE)
+
+    # A regular expression for handling `want` strings that contain
+    # expected exceptions.  It divides `want` into three pieces:
+    #    - the traceback header line (`hdr`)
+    #    - the traceback stack (`stack`)
+    #    - the exception message (`msg`), as generated by
+    #      traceback.format_exception_only()
+    # `msg` may have multiple lines.  We assume/require that the
+    # exception message is the first non-indented line starting with a word
+    # character following the traceback header line.
+    _EXCEPTION_RE = re.compile(r"""
+        # Grab the traceback header.  Different versions of Python have
+        # said different things on the first traceback line.
+        ^(?P<hdr> Traceback\ \(
+            (?: most\ recent\ call\ last
+            |   innermost\ last
+            ) \) :
+        )
+        \s* $                # toss trailing whitespace on the header.
+        (?P<stack> .*?)      # don't blink: absorb stuff until...
+        ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
+        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+    # A callable returning a true value iff its argument is a blank line
+    # or contains a single comment.
+    _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+    def parse(self, string, name='<string>'):
+        """
+        Divide the given string into examples and intervening text,
+        and return them as a list of alternating Examples and strings.
+        Line numbers for the Examples are 0-based.  The optional
+        argument `name` is a name identifying this string, and is only
+        used for error messages.
+        """
+        string = string.expandtabs()
+        # If all lines begin with the same indentation, then strip it.
+        min_indent = self._min_indent(string)
+        if min_indent > 0:
+            string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+        output = []
+        charno, lineno = 0, 0
+        # Find all doctest examples in the string:
+        for m in self._EXAMPLE_RE.finditer(string):
+            # Add the pre-example text to `output`.
+            output.append(string[charno:m.start()])
+            # Update lineno (lines before this example)
+            lineno += string.count('\n', charno, m.start())
+            # Extract info from the regexp match.
+            (source, options, want, exc_msg) = \
+                     self._parse_example(m, name, lineno)
+            # Create an Example, and add it to the list.
+            if not self._IS_BLANK_OR_COMMENT(source):
+                output.append( Example(source, want, exc_msg,
+                                    lineno=lineno,
+                                    indent=min_indent+len(m.group('indent')),
+                                    options=options) )
+            # Update lineno (lines inside this example)
+            lineno += string.count('\n', m.start(), m.end())
+            # Update charno.
+            charno = m.end()
+        # Add any remaining post-example text to `output`.
+        output.append(string[charno:])
+        return output
+
+    def get_doctest(self, string, globs, name, filename, lineno):
+        """
+        Extract all doctest examples from the given string, and
+        collect them into a `DocTest` object.
+
+        `globs`, `name`, `filename`, and `lineno` are attributes for
+        the new `DocTest` object.  See the documentation for `DocTest`
+        for more information.
+        """
+        return DocTest(self.get_examples(string, name), globs,
+                       name, filename, lineno, string)
+
+    def get_examples(self, string, name='<string>'):
+        """
+        Extract all doctest examples from the given string, and return
+        them as a list of `Example` objects.  Line numbers are
+        0-based, because it's most common in doctests that nothing
+        interesting appears on the same line as opening triple-quote,
+        and so the first interesting line is called \"line 1\" then.
+
+        The optional argument `name` is a name identifying this
+        string, and is only used for error messages.
+        """
+        return [x for x in self.parse(string, name)
+                if isinstance(x, Example)]
+
+    def _parse_example(self, m, name, lineno):
+        """
+        Given a regular expression match from `_EXAMPLE_RE` (`m`),
+        return a pair `(source, want)`, where `source` is the matched
+        example's source code (with prompts and indentation stripped);
+        and `want` is the example's expected output (with indentation
+        stripped).
+
+        `name` is the string's name, and `lineno` is the line number
+        where the example starts; both are used for error messages.
+        """
+        # Get the example's indentation level.
+        indent = len(m.group('indent'))
+
+        # Divide source into lines; check that they're properly
+        # indented; and then strip their indentation & prompts.
+        source_lines = m.group('source').split('\n')
+        self._check_prompt_blank(source_lines, indent, name, lineno)
+        self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+        source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+        # Divide want into lines; check that it's properly indented; and
+        # then strip the indentation.  Spaces before the last newline should
+        # be preserved, so plain rstrip() isn't good enough.
+        want = m.group('want')
+        want_lines = want.split('\n')
+        if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+            del want_lines[-1]  # forget final newline & spaces after it
+        self._check_prefix(want_lines, ' '*indent, name,
+                           lineno + len(source_lines))
+        want = '\n'.join([wl[indent:] for wl in want_lines])
+
+        # If `want` contains a traceback message, then extract it.
+        m = self._EXCEPTION_RE.match(want)
+        if m:
+            exc_msg = m.group('msg')
+        else:
+            exc_msg = None
+
+        # Extract options from the source.
+        options = self._find_options(source, name, lineno)
+
+        return source, options, want, exc_msg
+
+    # This regular expression looks for option directives in the
+    # source code of an example.  Option directives are comments
+    # starting with "doctest:".  Warning: this may give false
+    # positives for string-literals that contain the string
+    # "#doctest:".  Eliminating these false positives would require
+    # actually parsing the string; but we limit them by ignoring any
+    # line containing "#doctest:" that is *followed* by a quote mark.
+    _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+                                      re.MULTILINE)
+
+    def _find_options(self, source, name, lineno):
+        """
+        Return a dictionary containing option overrides extracted from
+        option directives in the given source string.
+
+        `name` is the string's name, and `lineno` is the line number
+        where the example starts; both are used for error messages.
+        """
+        options = {}
+        # (note: with the current regexp, this will match at most once:)
+        for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+            option_strings = m.group(1).replace(',', ' ').split()
+            for option in option_strings:
+                if (option[0] not in '+-' or
+                    option[1:] not in OPTIONFLAGS_BY_NAME):
+                    raise ValueError('line %r of the doctest for %s '
+                                     'has an invalid option: %r' %
+                                     (lineno+1, name, option))
+                flag = OPTIONFLAGS_BY_NAME[option[1:]]
+                options[flag] = (option[0] == '+')
+        if options and self._IS_BLANK_OR_COMMENT(source):
+            raise ValueError('line %r of the doctest for %s has an option '
+                             'directive on a line with no example: %r' %
+                             (lineno, name, source))
+        return options
+
+    # This regular expression finds the indentation of every non-blank
+    # line in a string.
+    _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+    def _min_indent(self, s):
+        "Return the minimum indentation of any non-blank line in `s`"
+        indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+        if len(indents) > 0:
+            return min(indents)
+        else:
+            return 0
+
+    def _check_prompt_blank(self, lines, indent, name, lineno):
+        """
+        Given the lines of a source string (including prompts and
+        leading indentation), check to make sure that every prompt is
+        followed by a space character.  If any line is not followed by
+        a space character, then raise ValueError.
+        """
+        for i, line in enumerate(lines):
+            if len(line) >= indent+4 and line[indent+3] != ' ':
+                raise ValueError('line %r of the docstring for %s '
+                                 'lacks blank after %s: %r' %
+                                 (lineno+i+1, name,
+                                  line[indent:indent+3], line))
+
+    def _check_prefix(self, lines, prefix, name, lineno):
+        """
+        Check that every line in the given list starts with the given
+        prefix; if any line does not, then raise a ValueError.
+        """
+        for i, line in enumerate(lines):
+            if line and not line.startswith(prefix):
+                raise ValueError('line %r of the docstring for %s has '
+                                 'inconsistent leading whitespace: %r' %
+                                 (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+    """
+    A class used to extract the DocTests that are relevant to a given
+    object, from its docstring and the docstrings of its contained
+    objects.  Doctests can currently be extracted from the following
+    object types: modules, functions, classes, methods, staticmethods,
+    classmethods, and properties.
+    """
+
+    def __init__(self, verbose=False, parser=DocTestParser(),
+                 recurse=True, _namefilter=None, exclude_empty=True):
+        """
+        Create a new doctest finder.
+
+        The optional argument `parser` specifies a class or
+        function that should be used to create new DocTest objects (or
+        objects that implement the same interface as DocTest).  The
+        signature for this factory function should match the signature
+        of the DocTest constructor.
+
+        If the optional argument `recurse` is false, then `find` will
+        only examine the given object, and not any contained objects.
+
+        If the optional argument `exclude_empty` is false, then `find`
+        will include tests for objects with empty docstrings.
+        """
+        self._parser = parser
+        self._verbose = verbose
+        self._recurse = recurse
+        self._exclude_empty = exclude_empty
+        # _namefilter is undocumented, and exists only for temporary backward-
+        # compatibility support of testmod's deprecated isprivate mess.
+        self._namefilter = _namefilter
+
+    def find(self, obj, name=None, module=None, globs=None,
+             extraglobs=None):
+        """
+        Return a list of the DocTests that are defined by the given
+        object's docstring, or by any of its contained objects'
+        docstrings.
+
+        The optional parameter `module` is the module that contains
+        the given object.  If the module is not specified or is None, then
+        the test finder will attempt to automatically determine the
+        correct module.  The object's module is used:
+
+            - As a default namespace, if `globs` is not specified.
+            - To prevent the DocTestFinder from extracting DocTests
+              from objects that are imported from other modules.
+            - To find the name of the file containing the object.
+            - To help find the line number of the object within its
+              file.
+
+        Contained objects whose module does not match `module` are ignored.
+
+        If `module` is False, no attempt to find the module will be made.
+        This is obscure, of use mostly in tests:  if `module` is False, or
+        is None but cannot be found automatically, then all objects are
+        considered to belong to the (non-existent) module, so all contained
+        objects will (recursively) be searched for doctests.
+
+        The globals for each DocTest is formed by combining `globs`
+        and `extraglobs` (bindings in `extraglobs` override bindings
+        in `globs`).  A new copy of the globals dictionary is created
+        for each DocTest.  If `globs` is not specified, then it
+        defaults to the module's `__dict__`, if specified, or {}
+        otherwise.  If `extraglobs` is not specified, then it defaults
+        to {}.
+
+        """
+        # If name was not specified, then extract it from the object.
+        if name is None:
+            name = getattr(obj, '__name__', None)
+            if name is None:
+                raise ValueError("DocTestFinder.find: name must be given "
+                        "when obj.__name__ doesn't exist: %r" %
+                                 (type(obj),))
+
+        # Find the module that contains the given object (if obj is
+        # a module, then module=obj.).  Note: this may fail, in which
+        # case module will be None.
+        if module is False:
+            module = None
+        elif module is None:
+            module = inspect.getmodule(obj)
+
+        # Read the module's source code.  This is used by
+        # DocTestFinder._find_lineno to find the line number for a
+        # given object's docstring.
+        try:
+            file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+            source_lines = linecache.getlines(file)
+            if not source_lines:
+                source_lines = None
+        except TypeError:
+            source_lines = None
+
+        # Initialize globals, and merge in extraglobs.
+        if globs is None:
+            if module is None:
+                globs = {}
+            else:
+                globs = module.__dict__.copy()
+        else:
+            globs = globs.copy()
+        if extraglobs is not None:
+            globs.update(extraglobs)
+
+        # Recursively expore `obj`, extracting DocTests.
+        tests = []
+        self._find(tests, obj, name, module, source_lines, globs, {})
+        # Sort the tests by alpha order of names, for consistency in
+        # verbose-mode output.  This was a feature of doctest in Pythons
+        # <= 2.3 that got lost by accident in 2.4.  It was repaired in
+        # 2.4.4 and 2.5.
+        tests.sort()
+        return tests
+
+    def _filter(self, obj, prefix, base):
+        """
+        Return true if the given object should not be examined.
+        """
+        return (self._namefilter is not None and
+                self._namefilter(prefix, base))
+
+    def _from_module(self, module, object):
+        """
+        Return true if the given object is defined in the given
+        module.
+        """
+        if module is None:
+            return True
+        elif inspect.isfunction(object):
+            return module.__dict__ is object.func_globals
+        elif inspect.isclass(object):
+            # Some jython classes don't set __module__
+            return module.__name__ == getattr(object, '__module__', None)
+        elif inspect.getmodule(object) is not None:
+            return module is inspect.getmodule(object)
+        elif hasattr(object, '__module__'):
+            return module.__name__ == object.__module__
+        elif isinstance(object, property):
+            return True # [XX] no way not be sure.
+        else:
+            raise ValueError("object must be a class or function")
+
+    def _find(self, tests, obj, name, module, source_lines, globs, seen):
+        """
+        Find tests for the given object and any contained objects, and
+        add them to `tests`.
+        """
+        if self._verbose:
+            print 'Finding tests in %s' % name
+
+        # If we've already processed this object, then ignore it.
+        if id(obj) in seen:
+            return
+        seen[id(obj)] = 1
+
+        # Find a test for this object, and add it to the list of tests.
+        test = self._get_test(obj, name, module, globs, source_lines)
+        if test is not None:
+            tests.append(test)
+
+        # Look for tests in a module's contained objects.
+        if inspect.ismodule(obj) and self._recurse:
+            for valname, val in obj.__dict__.items():
+                # Check if this contained object should be ignored.
+                if self._filter(val, name, valname):
+                    continue
+                valname = '%s.%s' % (name, valname)
+                # Recurse to functions & classes.
+                if ((inspect.isfunction(val) or inspect.isclass(val)) and
+                    self._from_module(module, val)):
+                    self._find(tests, val, valname, module, source_lines,
+                               globs, seen)
+
+        # Look for tests in a module's __test__ dictionary.
+        if inspect.ismodule(obj) and self._recurse:
+            for valname, val in getattr(obj, '__test__', {}).items():
+                if not isinstance(valname, basestring):
+                    raise ValueError("DocTestFinder.find: __test__ keys "
+                                     "must be strings: %r" %
+                                     (type(valname),))
+                if not (inspect.isfunction(val) or inspect.isclass(val) or
+                        inspect.ismethod(val) or inspect.ismodule(val) or
+                        isinstance(val, basestring)):
+                    raise ValueError("DocTestFinder.find: __test__ values "
+                                     "must be strings, functions, methods, "
+                                     "classes, or modules: %r" %
+                                     (type(val),))
+                valname = '%s.__test__.%s' % (name, valname)
+                self._find(tests, val, valname, module, source_lines,
+                           globs, seen)
+
+        # Look for tests in a class's contained objects.
+        if inspect.isclass(obj) and self._recurse:
+            for valname, val in obj.__dict__.items():
+                # Check if this contained object should be ignored.
+                if self._filter(val, name, valname):
+                    continue
+                # Special handling for staticmethod/classmethod.
+                if isinstance(val, staticmethod):
+                    val = getattr(obj, valname)
+                if isinstance(val, classmethod):
+                    val = getattr(obj, valname).im_func
+
+                # Recurse to methods, properties, and nested classes.
+                if ((inspect.isfunction(val) or inspect.isclass(val) or
+                      isinstance(val, property)) and
+                      self._from_module(module, val)):
+                    valname = '%s.%s' % (name, valname)
+                    self._find(tests, val, valname, module, source_lines,
+                               globs, seen)
+
+    def _get_test(self, obj, name, module, globs, source_lines):
+        """
+        Return a DocTest for the given object, if it defines a docstring;
+        otherwise, return None.
+        """
+        # Extract the object's docstring.  If it doesn't have one,
+        # then return None (no test for this object).
+        if isinstance(obj, basestring):
+            docstring = obj
+        else:
+            try:
+                if obj.__doc__ is None:
+                    docstring = ''
+                else:
+                    docstring = obj.__doc__
+                    if not isinstance(docstring, basestring):
+                        docstring = str(docstring)
+            except (TypeError, AttributeError):
+                docstring = ''
+
+        # Find the docstring's location in the file.
+        lineno = self._find_lineno(obj, source_lines)
+
+        # Don't bother if the docstring is empty.
+        if self._exclude_empty and not docstring:
+            return None
+
+        # Return a DocTest for this object.
+        if module is None:
+            filename = None
+        else:
+            filename = getattr(module, '__file__', module.__name__)
+            if filename[-4:] in (".pyc", ".pyo"):
+                filename = filename[:-1]
+            elif sys.platform.startswith('java') and \
+                    filename.endswith('$py.class'):
+                filename = '%s.py' % filename[:-9]
+        return self._parser.get_doctest(docstring, globs, name,
+                                        filename, lineno)
+
+    def _find_lineno(self, obj, source_lines):
+        """
+        Return a line number of the given object's docstring.  Note:
+        this method assumes that the object has a docstring.
+        """
+        lineno = None
+
+        # Find the line number for modules.
+        if inspect.ismodule(obj):
+            lineno = 0
+
+        # Find the line number for classes.
+        # Note: this could be fooled if a class is defined multiple
+        # times in a single file.
+        if inspect.isclass(obj):
+            if source_lines is None:
+                return None
+            pat = re.compile(r'^\s*class\s*%s\b' %
+                             getattr(obj, '__name__', '-'))
+            for i, line in enumerate(source_lines):
+                if pat.match(line):
+                    lineno = i
+                    break
+
+        # Find the line number for functions & methods.
+        if inspect.ismethod(obj): obj = obj.im_func
+        if inspect.isfunction(obj): obj = obj.func_code
+        if inspect.istraceback(obj): obj = obj.tb_frame
+        if inspect.isframe(obj): obj = obj.f_code
+        if inspect.iscode(obj):
+            lineno = getattr(obj, 'co_firstlineno', None)-1
+
+        # Find the line number where the docstring starts.  Assume
+        # that it's the first line that begins with a quote mark.
+        # Note: this could be fooled by a multiline function
+        # signature, where a continuation line begins with a quote
+        # mark.
+        if lineno is not None:
+            if source_lines is None:
+                return lineno+1
+            pat = re.compile('(^|.*:)\s*\w*("|\')')
+            for lineno in range(lineno, len(source_lines)):
+                if pat.match(source_lines[lineno]):
+                    return lineno
+
+        # We couldn't find the line number.
+        return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+    # This divider string is used to separate failure messages, and to
+    # separate sections of the summary.
+    DIVIDER = "*" * 70
+
+    def __init__(self, checker=None, verbose=None, optionflags=0):
+        """
+        Create a new test runner.
+
+        Optional keyword arg `checker` is the `OutputChecker` that
+        should be used to compare the expected outputs and actual
+        outputs of doctest examples.
+
+        Optional keyword arg 'verbose' prints lots of stuff if true,
+        only failures if false; by default, it's true iff '-v' is in
+        sys.argv.
+
+        Optional argument `optionflags` can be used to control how the
+        test runner compares expected output to actual output, and how
+        it displays failures.  See the documentation for `testmod` for
+        more information.
+        """
+        self._checker = checker or OutputChecker()
+        if verbose is None:
+            verbose = '-v' in sys.argv
+        self._verbose = verbose
+        self.optionflags = optionflags
+        self.original_optionflags = optionflags
+
+        # Keep track of the examples we've run.
+        self.tries = 0
+        self.failures = 0
+        self._name2ft = {}
+
+        # Create a fake output target for capturing doctest output.
+        self._fakeout = _SpoofOut()
+
+    #/////////////////////////////////////////////////////////////////
+    # Reporting methods
+    #/////////////////////////////////////////////////////////////////
+
+    def report_start(self, out, test, example):
+        """
+        Report that the test runner is about to process the given
+        example.  (Only displays a message if verbose=True)
+        """
+        if self._verbose:
+            if example.want:
+                out('Trying:\n' + _indent(example.source) +
+                    'Expecting:\n' + _indent(example.want))
+            else:
+                out('Trying:\n' + _indent(example.source) +
+                    'Expecting nothing\n')
+
+    def report_success(self, out, test, example, got):
+        """
+        Report that the given example ran successfully.  (Only
+        displays a message if verbose=True)
+        """
+        if self._verbose:
+            out("ok\n")
+
+    def report_failure(self, out, test, example, got):
+        """
+        Report that the given example failed.
+        """
+        out(self._failure_header(test, example) +
+            self._checker.output_difference(example, got, self.optionflags))
+
+    def report_unexpected_exception(self, out, test, example, exc_info):
+        """
+        Report that the given example raised an unexpected exception.
+        """
+        out(self._failure_header(test, example) +
+            'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+    def _failure_header(self, test, example):
+        out = [self.DIVIDER]
+        if test.filename:
+            if test.lineno is not None and example.lineno is not None:
+                lineno = test.lineno + example.lineno + 1
+            else:
+                lineno = '?'
+            out.append('File "%s", line %s, in %s' %
+                       (test.filename, lineno, test.name))
+        else:
+            out.append('Line %s, in %s' % (example.lineno+1, test.name))
+        out.append('Failed example:')
+        source = example.source
+        out.append(_indent(source))
+        return '\n'.join(out)
+
+    #/////////////////////////////////////////////////////////////////
+    # DocTest Running
+    #/////////////////////////////////////////////////////////////////
+
+    def __run(self, test, compileflags, out):
+        """
+        Run the examples in `test`.  Write the outcome of each example
+        with one of the `DocTestRunner.report_*` methods, using the
+        writer function `out`.  `compileflags` is the set of compiler
+        flags that should be used to execute examples.  Return a tuple
+        `(f, t)`, where `t` is the number of examples tried, and `f`
+        is the number of examples that failed.  The examples are run
+        in the namespace `test.globs`.
+        """
+        # Keep track of the number of failures and tries.
+        failures = tries = 0
+
+        # Save the option flags (since option directives can be used
+        # to modify them).
+        original_optionflags = self.optionflags
+
+        SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+        check = self._checker.check_output
+
+        # Process each example.
+        for examplenum, example in enumerate(test.examples):
+
+            # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+            # reporting after the first failure.
+            quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+                     failures > 0)
+
+            # Merge in the example's options.
+            self.optionflags = original_optionflags
+            if example.options:
+                for (optionflag, val) in example.options.items():
+                    if val:
+                        self.optionflags |= optionflag
+                    else:
+                        self.optionflags &= ~optionflag
+
+            # Record that we started this example.
+            tries += 1
+            if not quiet:
+                self.report_start(out, test, example)
+
+            # Use a special filename for compile(), so we can retrieve
+            # the source code during interactive debugging (see
+            # __patched_linecache_getlines).
+            filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+            # Run the example in the given context (globs), and record
+            # any exception that gets raised.  (But don't intercept
+            # keyboard interrupts.)
+            try:
+                # Don't blink!  This is where the user's code gets run.
+                exec compile(example.source, filename, "single",
+                             compileflags, 1) in test.globs
+                self.debugger.set_continue() # ==== Example Finished ====
+                exception = None
+            except KeyboardInterrupt:
+                raise
+            except:
+                exception = sys.exc_info()
+                self.debugger.set_continue() # ==== Example Finished ====
+
+            got = self._fakeout.getvalue()  # the actual output
+            self._fakeout.truncate(0)
+            outcome = FAILURE   # guilty until proved innocent or insane
+
+            # If the example executed without raising any exceptions,
+            # verify its output.
+            if exception is None:
+                if check(example.want, got, self.optionflags):
+                    outcome = SUCCESS
+
+            # The example raised an exception:  check if it was expected.
+            else:
+                exc_info = sys.exc_info()
+                exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+                if not quiet:
+                    got += _exception_traceback(exc_info)
+
+                # If `example.exc_msg` is None, then we weren't expecting
+                # an exception.
+                if example.exc_msg is None:
+                    outcome = BOOM
+
+                # We expected an exception:  see whether it matches.
+                elif check(example.exc_msg, exc_msg, self.optionflags):
+                    outcome = SUCCESS
+
+                # Another chance if they didn't care about the detail.
+                elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+                    m1 = re.match(r'[^:]*:', example.exc_msg)
+                    m2 = re.match(r'[^:]*:', exc_msg)
+                    if m1 and m2 and check(m1.group(0), m2.group(0),
+                                           self.optionflags):
+                        outcome = SUCCESS
+
+            # Report the outcome.
+            if outcome is SUCCESS:
+                if not quiet:
+                    self.report_success(out, test, example, got)
+            elif outcome is FAILURE:
+                if not quiet:
+                    self.report_failure(out, test, example, got)
+                failures += 1
+            elif outcome is BOOM:
+                if not quiet:
+                    self.report_unexpected_exception(out, test, example,
+                                                     exc_info)
+                failures += 1
+            else:
+                assert False, ("unknown outcome", outcome)
+
+        # Restore the option flags (in case they were modified)
+        self.optionflags = original_optionflags
+
+        # Record and return the number of failures and tries.
+        self.__record_outcome(test, failures, tries)
+        return failures, tries
+
+    def __record_outcome(self, test, f, t):
+        """
+        Record the fact that the given DocTest (`test`) generated `f`
+        failures out of `t` tried examples.
+        """
+        f2, t2 = self._name2ft.get(test.name, (0,0))
+        self._name2ft[test.name] = (f+f2, t+t2)
+        self.failures += f
+        self.tries += t
+
+    __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+                                         r'(?P<name>[\w\.]+)'
+                                         r'\[(?P<examplenum>\d+)\]>$')
+    def __patched_linecache_getlines(self, filename):
+        m = self.__LINECACHE_FILENAME_RE.match(filename)
+        if m and m.group('name') == self.test.name:
+            example = self.test.examples[int(m.group('examplenum'))]
+            return example.source.splitlines(True)
+        else:
+            return self.save_linecache_getlines(filename)
+
+    def run(self, test, compileflags=None, out=None, clear_globs=True):
+        """
+        Run the examples in `test`, and display the results using the
+        writer function `out`.
+
+        The examples are run in the namespace `test.globs`.  If
+        `clear_globs` is true (the default), then this namespace will
+        be cleared after the test runs, to help with garbage
+        collection.  If you would like to examine the namespace after
+        the test completes, then use `clear_globs=False`.
+
+        `compileflags` gives the set of flags that should be used by
+        the Python compiler when running the examples.  If not
+        specified, then it will default to the set of future-import
+        flags that apply to `globs`.
+
+        The output of each example is checked using
+        `DocTestRunner.check_output`, and the results are formatted by
+        the `DocTestRunner.report_*` methods.
+        """
+        self.test = test
+
+        if compileflags is None:
+            compileflags = _extract_future_flags(test.globs)
+
+        save_stdout = sys.stdout
+        if out is None:
+            out = save_stdout.write
+        sys.stdout = self._fakeout
+
+        # Patch pdb.set_trace to restore sys.stdout during interactive
+        # debugging (so it's not still redirected to self._fakeout).
+        # Note that the interactive output will go to *our*
+        # save_stdout, even if that's not the real sys.stdout; this
+        # allows us to write test cases for the set_trace behavior.
+        save_set_trace = pdb.set_trace
+        self.debugger = _OutputRedirectingPdb(save_stdout)
+        self.debugger.reset()
+        pdb.set_trace = self.debugger.set_trace
+
+        # Patch linecache.getlines, so we can see the example's source
+        # when we're inside the debugger.
+        self.save_linecache_getlines = linecache.getlines
+        linecache.getlines = self.__patched_linecache_getlines
+
+        try:
+            return self.__run(test, compileflags, out)
+        finally:
+            sys.stdout = save_stdout
+            pdb.set_trace = save_set_trace
+            linecache.getlines = self.save_linecache_getlines
+            if clear_globs:
+                test.globs.clear()
+
+    #/////////////////////////////////////////////////////////////////
+    # Summarization
+    #/////////////////////////////////////////////////////////////////
+    def summarize(self, verbose=None):
+        """
+        Print a summary of all the test cases that have been run by
+        this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+        the total number of failed examples, and `t` is the total
+        number of tried examples.
+
+        The optional `verbose` argument controls how detailed the
+        summary is.  If the verbosity is not specified, then the
+        DocTestRunner's verbosity is used.
+        """
+        if verbose is None:
+            verbose = self._verbose
+        notests = []
+        passed = []
+        failed = []
+        totalt = totalf = 0
+        for x in self._name2ft.items():
+            name, (f, t) = x
+            assert f <= t
+            totalt += t
+            totalf += f
+            if t == 0:
+                notests.append(name)
+            elif f == 0:
+                passed.append( (name, t) )
+            else:
+                failed.append(x)
+        if verbose:
+            if notests:
+                print len(notests), "items had no tests:"
+                notests.sort()
+                for thing in notests:
+                    print "   ", thing
+            if passed:
+                print len(passed), "items passed all tests:"
+                passed.sort()
+                for thing, count in passed:
+                    print " %3d tests in %s" % (count, thing)
+        if failed:
+            print self.DIVIDER
+            print len(failed), "items had failures:"
+            failed.sort()
+            for thing, (f, t) in failed:
+                print " %3d of %3d in %s" % (f, t, thing)
+        if verbose:
+            print totalt, "tests in", len(self._name2ft), "items."
+            print totalt - totalf, "passed and", totalf, "failed."
+        if totalf:
+            print "***Test Failed***", totalf, "failures."
+        elif verbose:
+            print "Test passed."
+        return totalf, totalt
+
+    #/////////////////////////////////////////////////////////////////
+    # Backward compatibility cruft to maintain doctest.master.
+    #/////////////////////////////////////////////////////////////////
+    def merge(self, other):
+        d = self._name2ft
+        for name, (f, t) in other._name2ft.items():
+            if name in d:
+                print "*** DocTestRunner.merge: '" + name + "' in both" \
+                    " testers; summing outcomes."
+                f2, t2 = d[name]
+                f = f + f2
+                t = t + t2
+            d[name] = f, t
+
+class OutputChecker:
+    """
+    A class used to check the whether the actual output from a doctest
+    example matches the expected output.  `OutputChecker` defines two
+    methods: `check_output`, which compares a given pair of outputs,
+    and returns true if they match; and `output_difference`, which
+    returns a string describing the differences between two outputs.
+    """
+    def check_output(self, want, got, optionflags):
+        """
+        Return True iff the actual output from an example (`got`)
+        matches the expected output (`want`).  These strings are
+        always considered to match if they are identical; but
+        depending on what option flags the test runner is using,
+        several non-exact match types are also possible.  See the
+        documentation for `TestRunner` for more information about
+        option flags.
+        """
+        # Handle the common case first, for efficiency:
+        # if they're string-identical, always return true.
+        if got == want:
+            return True
+
+        # The values True and False replaced 1 and 0 as the return
+        # value for boolean comparisons in Python 2.3.
+        if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+            if (got,want) == ("True\n", "1\n"):
+                return True
+            if (got,want) == ("False\n", "0\n"):
+                return True
+
+        # <BLANKLINE> can be used as a special sequence to signify a
+        # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+        if not (optionflags & DONT_ACCEPT_BLANKLINE):
+            # Replace <BLANKLINE> in want with a blank line.
+            want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+                          '', want)
+            # If a line in got contains only spaces, then remove the
+            # spaces.
+            got = re.sub('(?m)^\s*?$', '', got)
+            if got == want:
+                return True
+
+        # This flag causes doctest to ignore any differences in the
+        # contents of whitespace strings.  Note that this can be used
+        # in conjunction with the ELLIPSIS flag.
+        if optionflags & NORMALIZE_WHITESPACE:
+            got = ' '.join(got.split())
+            want = ' '.join(want.split())
+            if got == want:
+                return True
+
+        # The ELLIPSIS flag says to let the sequence "..." in `want`
+        # match any substring in `got`.
+        if optionflags & ELLIPSIS:
+            if _ellipsis_match(want, got):
+                return True
+
+        # We didn't find any match; return false.
+        return False
+
+    # Should we do a fancy diff?
+    def _do_a_fancy_diff(self, want, got, optionflags):
+        # Not unless they asked for a fancy diff.
+        if not optionflags & (REPORT_UDIFF |
+                              REPORT_CDIFF |
+                              REPORT_NDIFF):
+            return False
+
+        # If expected output uses ellipsis, a meaningful fancy diff is
+        # too hard ... or maybe not.  In two real-life failures Tim saw,
+        # a diff was a major help anyway, so this is commented out.
+        # [todo] _ellipsis_match() knows which pieces do and don't match,
+        # and could be the basis for a kick-ass diff in this case.
+        ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+        ##    return False
+
+        # ndiff does intraline difference marking, so can be useful even
+        # for 1-line differences.
+        if optionflags & REPORT_NDIFF:
+            return True
+
+        # The other diff types need at least a few lines to be helpful.
+        return want.count('\n') > 2 and got.count('\n') > 2
+
+    def output_difference(self, example, got, optionflags):
+        """
+        Return a string describing the differences between the
+        expected output for a given example (`example`) and the actual
+        output (`got`).  `optionflags` is the set of option flags used
+        to compare `want` and `got`.
+        """
+        want = example.want
+        # If <BLANKLINE>s are being used, then replace blank lines
+        # with <BLANKLINE> in the actual output string.
+        if not (optionflags & DONT_ACCEPT_BLANKLINE):
+            got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+        # Check if we should use diff.
+        if self._do_a_fancy_diff(want, got, optionflags):
+            # Split want & got into lines.
+            want_lines = want.splitlines(True)  # True == keep line ends
+            got_lines = got.splitlines(True)
+            # Use difflib to find their differences.
+            if optionflags & REPORT_UDIFF:
+                diff = difflib.unified_diff(want_lines, got_lines, n=2)
+                diff = list(diff)[2:] # strip the diff header
+                kind = 'unified diff with -expected +actual'
+            elif optionflags & REPORT_CDIFF:
+                diff = difflib.context_diff(want_lines, got_lines, n=2)
+                diff = list(diff)[2:] # strip the diff header
+                kind = 'context diff with expected followed by actual'
+            elif optionflags & REPORT_NDIFF:
+                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+                diff = list(engine.compare(want_lines, got_lines))
+                kind = 'ndiff with -expected +actual'
+            else:
+                assert 0, 'Bad diff option'
+            # Remove trailing whitespace on diff output.
+            diff = [line.rstrip() + '\n' for line in diff]
+            return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+        # If we're not using diff, then simply list the expected
+        # output followed by the actual output.
+        if want and got:
+            return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+        elif want:
+            return 'Expected:\n%sGot nothing\n' % _indent(want)
+        elif got:
+            return 'Expected nothing\nGot:\n%s' % _indent(got)
+        else:
+            return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+    """A DocTest example has failed in debugging mode.
+
+    The exception instance has variables:
+
+    - test: the DocTest object being run
+
+    - excample: the Example object that failed
+
+    - got: the actual output
+    """
+    def __init__(self, test, example, got):
+        self.test = test
+        self.example = example
+        self.got = got
+
+    def __str__(self):
+        return str(self.test)
+
+class UnexpectedException(Exception):
+    """A DocTest example has encountered an unexpected exception
+
+    The exception instance has variables:
+
+    - test: the DocTest object being run
+
+    - excample: the Example object that failed
+
+    - exc_info: the exception info
+    """
+    def __init__(self, test, example, exc_info):
+        self.test = test
+        self.example = example
+        self.exc_info = exc_info
+
+    def __str__(self):
+        return str(self.test)
+
+class DebugRunner(DocTestRunner):
+
+    def run(self, test, compileflags=None, out=None, clear_globs=True):
+        r = DocTestRunner.run(self, test, compileflags, out, False)
+        if clear_globs:
+            test.globs.clear()
+        return r
+
+    def report_unexpected_exception(self, out, test, example, exc_info):
+        raise UnexpectedException(test, example, exc_info)
+
+    def report_failure(self, out, test, example, got):
+        raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+            report=True, optionflags=0, extraglobs=None,
+            raise_on_error=False, exclude_empty=False):
+    """m=None, name=None, globs=None, verbose=None, isprivate=None,
+       report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+       exclude_empty=False
+
+    Test examples in docstrings in functions and classes reachable
+    from module m (or the current module if m is not supplied), starting
+    with m.__doc__.  Unless isprivate is specified, private names
+    are not skipped.
+
+    Also test examples reachable from dict m.__test__ if it exists and is
+    not None.  m.__test__ maps names to functions, classes and strings;
+    function and class docstrings are tested even if the name is private;
+    strings are tested directly, as if they were docstrings.
+
+    Return (#failures, #tests).
+
+    See doctest.__doc__ for an overview.
+
+    Optional keyword arg "name" gives the name of the module; by default
+    use m.__name__.
+
+    Optional keyword arg "globs" gives a dict to be used as the globals
+    when executing examples; by default, use m.__dict__.  A copy of this
+    dict is actually used for each docstring, so that each docstring's
+    examples start with a clean slate.
+
+    Optional keyword arg "extraglobs" gives a dictionary that should be
+    merged into the globals that are used to execute examples.  By
+    default, no extra globals are used.  This is new in 2.4.
+
+    Optional keyword arg "verbose" prints lots of stuff if true, prints
+    only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+    Optional keyword arg "report" prints a summary at the end when true,
+    else prints nothing at the end.  In verbose mode, the summary is
+    detailed, else very brief (in fact, empty if all tests passed).
+
+    Optional keyword arg "optionflags" or's together module constants,
+    and defaults to 0.  This is new in 2.3.  Possible values (see the
+    docs for details):
+
+        DONT_ACCEPT_TRUE_FOR_1
+        DONT_ACCEPT_BLANKLINE
+        NORMALIZE_WHITESPACE
+        ELLIPSIS
+        IGNORE_EXCEPTION_DETAIL
+        REPORT_UDIFF
+        REPORT_CDIFF
+        REPORT_NDIFF
+        REPORT_ONLY_FIRST_FAILURE
+
+    Optional keyword arg "raise_on_error" raises an exception on the
+    first unexpected exception or failure. This allows failures to be
+    post-mortem debugged.
+
+    Deprecated in Python 2.4:
+    Optional keyword arg "isprivate" specifies a function used to
+    determine whether a name is private.  The default function is
+    treat all functions as public.  Optionally, "isprivate" can be
+    set to doctest.is_private to skip over functions marked as private
+    using the underscore naming convention; see its docs for details.
+
+    Advanced tomfoolery:  testmod runs methods of a local instance of
+    class doctest.Tester, then merges the results into (or creates)
+    global Tester instance doctest.master.  Methods of doctest.master
+    can be called directly too, if you want to do something unusual.
+    Passing report=0 to testmod is especially useful then, to delay
+    displaying a summary.  Invoke doctest.master.summarize(verbose)
+    when you're done fiddling.
+    """
+    global master
+
+    if isprivate is not None:
+        warnings.warn("the isprivate argument is deprecated; "
+                      "examine DocTestFinder.find() lists instead",
+                      DeprecationWarning)
+
+    # If no module was given, then use __main__.
+    if m is None:
+        # DWA - m will still be None if this wasn't invoked from the command
+        # line, in which case the following TypeError is about as good an error
+        # as we should expect
+        m = sys.modules.get('__main__')
+
+    # Check that we were actually given a module.
+    if not inspect.ismodule(m):
+        raise TypeError("testmod: module required; %r" % (m,))
+
+    # If no name was given, then use the module's name.
+    if name is None:
+        name = m.__name__
+
+    # Find, parse, and run all tests in the given module.
+    finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+    if raise_on_error:
+        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+    else:
+        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+        runner.run(test)
+
+    if report:
+        runner.summarize()
+
+    if master is None:
+        master = runner
+    else:
+        master.merge(runner)
+
+    return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+             globs=None, verbose=None, report=True, optionflags=0,
+             extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+    """
+    Test examples in the given file.  Return (#failures, #tests).
+
+    Optional keyword arg "module_relative" specifies how filenames
+    should be interpreted:
+
+      - If "module_relative" is True (the default), then "filename"
+         specifies a module-relative path.  By default, this path is
+         relative to the calling module's directory; but if the
+         "package" argument is specified, then it is relative to that
+         package.  To ensure os-independence, "filename" should use
+         "/" characters to separate path segments, and should not
+         be an absolute path (i.e., it may not begin with "/").
+
+      - If "module_relative" is False, then "filename" specifies an
+        os-specific path.  The path may be absolute or relative (to
+        the current working directory).
+
+    Optional keyword arg "name" gives the name of the test; by default
+    use the file's basename.
+
+    Optional keyword argument "package" is a Python package or the
+    name of a Python package whose directory should be used as the
+    base directory for a module relative filename.  If no package is
+    specified, then the calling module's directory is used as the base
+    directory for module relative filenames.  It is an error to
+    specify "package" if "module_relative" is False.
+
+    Optional keyword arg "globs" gives a dict to be used as the globals
+    when executing examples; by default, use {}.  A copy of this dict
+    is actually used for each docstring, so that each docstring's
+    examples start with a clean slate.
+
+    Optional keyword arg "extraglobs" gives a dictionary that should be
+    merged into the globals that are used to execute examples.  By
+    default, no extra globals are used.
+
+    Optional keyword arg "verbose" prints lots of stuff if true, prints
+    only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+    Optional keyword arg "report" prints a summary at the end when true,
+    else prints nothing at the end.  In verbose mode, the summary is
+    detailed, else very brief (in fact, empty if all tests passed).
+
+    Optional keyword arg "optionflags" or's together module constants,
+    and defaults to 0.  Possible values (see the docs for details):
+
+        DONT_ACCEPT_TRUE_FOR_1
+        DONT_ACCEPT_BLANKLINE
+        NORMALIZE_WHITESPACE
+        ELLIPSIS
+        IGNORE_EXCEPTION_DETAIL
+        REPORT_UDIFF
+        REPORT_CDIFF
+        REPORT_NDIFF
+        REPORT_ONLY_FIRST_FAILURE
+
+    Optional keyword arg "raise_on_error" raises an exception on the
+    first unexpected exception or failure. This allows failures to be
+    post-mortem debugged.
+
+    Optional keyword arg "parser" specifies a DocTestParser (or
+    subclass) that should be used to extract tests from the files.
+
+    Advanced tomfoolery:  testmod runs methods of a local instance of
+    class doctest.Tester, then merges the results into (or creates)
+    global Tester instance doctest.master.  Methods of doctest.master
+    can be called directly too, if you want to do something unusual.
+    Passing report=0 to testmod is especially useful then, to delay
+    displaying a summary.  Invoke doctest.master.summarize(verbose)
+    when you're done fiddling.
+    """
+    global master
+
+    if package and not module_relative:
+        raise ValueError("Package may only be specified for module-"
+                         "relative paths.")
+
+    # Relativize the path
+    if module_relative:
+        package = _normalize_module(package)
+        filename = _module_relative_path(package, filename)
+
+    # If no name was given, then use the file's name.
+    if name is None:
+        name = os.path.basename(filename)
+
+    # Assemble the globals.
+    if globs is None:
+        globs = {}
+    else:
+        globs = globs.copy()
+    if extraglobs is not None:
+        globs.update(extraglobs)
+
+    if raise_on_error:
+        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+    else:
+        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+    # Read the file, convert it to a test, and run it.
+    s = open(filename).read()
+    test = parser.get_doctest(s, globs, name, filename, 0)
+    runner.run(test)
+
+    if report:
+        runner.summarize()
+
+    if master is None:
+        master = runner
+    else:
+        master.merge(runner)
+
+    return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+                           compileflags=None, optionflags=0):
+    """
+    Test examples in the given object's docstring (`f`), using `globs`
+    as globals.  Optional argument `name` is used in failure messages.
+    If the optional argument `verbose` is true, then generate output
+    even if there are no failures.
+
+    `compileflags` gives the set of flags that should be used by the
+    Python compiler when running the examples.  If not specified, then
+    it will default to the set of future-import flags that apply to
+    `globs`.
+
+    Optional keyword arg `optionflags` specifies options for the
+    testing and output.  See the documentation for `testmod` for more
+    information.
+    """
+    # Find, parse, and run all tests in the given module.
+    finder = DocTestFinder(verbose=verbose, recurse=False)
+    runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+    for test in finder.find(f, name, globs=globs):
+        runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility.  It's not
+# actually used in any way.
+
+class Tester:
+    def __init__(self, mod=None, globs=None, verbose=None,
+                 isprivate=None, optionflags=0):
+
+        warnings.warn("class Tester is deprecated; "
+                      "use class doctest.DocTestRunner instead",
+                      DeprecationWarning, stacklevel=2)
+        if mod is None and globs is None:
+            raise TypeError("Tester.__init__: must specify mod or globs")
+        if mod is not None and not inspect.ismodule(mod):
+            raise TypeError("Tester.__init__: mod must be a module; %r" %
+                            (mod,))
+        if globs is None:
+            globs = mod.__dict__
+        self.globs = globs
+
+        self.verbose = verbose
+        self.isprivate = isprivate
+        self.optionflags = optionflags
+        self.testfinder = DocTestFinder(_namefilter=isprivate)
+        self.testrunner = DocTestRunner(verbose=verbose,
+                                        optionflags=optionflags)
+
+    def runstring(self, s, name):
+        test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+        if self.verbose:
+            print "Running string", name
+        (f,t) = self.testrunner.run(test)
+        if self.verbose:
+            print f, "of", t, "examples failed in string", name
+        return (f,t)
+
+    def rundoc(self, object, name=None, module=None):
+        f = t = 0
+        tests = self.testfinder.find(object, name, module=module,
+                                     globs=self.globs)
+        for test in tests:
+            (f2, t2) = self.testrunner.run(test)
+            (f,t) = (f+f2, t+t2)
+        return (f,t)
+
+    def rundict(self, d, name, module=None):
+        import new
+        m = new.module(name)
+        m.__dict__.update(d)
+        if module is None:
+            module = False
+        return self.rundoc(m, name, module)
+
+    def run__test__(self, d, name):
+        import new
+        m = new.module(name)
+        m.__test__ = d
+        return self.rundoc(m, name)
+
+    def summarize(self, verbose=None):
+        return self.testrunner.summarize(verbose)
+
+    def merge(self, other):
+        self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+    global _unittest_reportflags
+
+    if (flags & REPORTING_FLAGS) != flags:
+        raise ValueError("Only reporting flags allowed", flags)
+    old = _unittest_reportflags
+    _unittest_reportflags = flags
+    return old
+
+
+class DocTestCase(unittest.TestCase):
+
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None):
+
+        unittest.TestCase.__init__(self)
+        self._dt_optionflags = optionflags
+        self._dt_checker = checker
+        self._dt_test = test
+        self._dt_setUp = setUp
+        self._dt_tearDown = tearDown
+
+    def setUp(self):
+        test = self._dt_test
+
+        if self._dt_setUp is not None:
+            self._dt_setUp(test)
+
+    def tearDown(self):
+        test = self._dt_test
+
+        if self._dt_tearDown is not None:
+            self._dt_tearDown(test)
+
+        test.globs.clear()
+
+    def runTest(self):
+        test = self._dt_test
+        old = sys.stdout
+        new = StringIO()
+        optionflags = self._dt_optionflags
+
+        if not (optionflags & REPORTING_FLAGS):
+            # The option flags don't include any reporting flags,
+            # so add the default reporting flags
+            optionflags |= _unittest_reportflags
+
+        runner = DocTestRunner(optionflags=optionflags,
+                               checker=self._dt_checker, verbose=False)
+
+        try:
+            runner.DIVIDER = "-"*70
+            failures, tries = runner.run(
+                test, out=new.write, clear_globs=False)
+        finally:
+            sys.stdout = old
+
+        if failures:
+            raise self.failureException(self.format_failure(new.getvalue()))
+
+    def format_failure(self, err):
+        test = self._dt_test
+        if test.lineno is None:
+            lineno = 'unknown line number'
+        else:
+            lineno = '%s' % test.lineno
+        lname = '.'.join(test.name.split('.')[-1:])
+        return ('Failed doctest test for %s\n'
+                '  File "%s", line %s, in %s\n\n%s'
+                % (test.name, test.filename, lineno, lname, err)
+                )
+
+    def debug(self):
+        self.setUp()
+        runner = DebugRunner(optionflags=self._dt_optionflags,
+                             checker=self._dt_checker, verbose=False)
+        runner.run(self._dt_test)
+        self.tearDown()
+
+    def id(self):
+        return self._dt_test.name
+
+    def __repr__(self):
+        name = self._dt_test.name.split('.')
+        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+    __str__ = __repr__
+
+    def shortDescription(self):
+        return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+                 **options):
+    """
+    Convert doctest tests for a module to a unittest test suite.
+
+    This converts each documentation string in a module that
+    contains doctest tests to a unittest test case.  If any of the
+    tests in a doc string fail, then the test case fails.  An exception
+    is raised showing the name of the file containing the test and a
+    (sometimes approximate) line number.
+
+    The `module` argument provides the module to be tested.  The argument
+    can be either a module or a module name.
+
+    If no argument is given, the calling module is used.
+
+    A number of options may be provided as keyword arguments:
+
+    setUp
+      A set-up function.  This is called before running the
+      tests in each file. The setUp function will be passed a DocTest
+      object.  The setUp function can access the test globals as the
+      globs attribute of the test passed.
+
+    tearDown
+      A tear-down function.  This is called after running the
+      tests in each file.  The tearDown function will be passed a DocTest
+      object.  The tearDown function can access the test globals as the
+      globs attribute of the test passed.
+
+    globs
+      A dictionary containing initial global variables for the tests.
+
+    optionflags
+       A set of doctest option flags expressed as an integer.
+    """
+
+    if test_finder is None:
+        test_finder = DocTestFinder()
+
+    module = _normalize_module(module)
+    tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+    if globs is None:
+        globs = module.__dict__
+    if not tests:
+        # Why do we want to do this? Because it reveals a bug that might
+        # otherwise be hidden.
+        raise ValueError(module, "has no tests")
+
+    tests.sort()
+    suite = unittest.TestSuite()
+    for test in tests:
+        if len(test.examples) == 0:
+            continue
+        if not test.filename:
+            filename = module.__file__
+            if filename[-4:] in (".pyc", ".pyo"):
+                filename = filename[:-1]
+            elif sys.platform.startswith('java') and \
+                    filename.endswith('$py.class'):
+                filename = '%s.py' % filename[:-9]
+            test.filename = filename
+        suite.addTest(DocTestCase(test, **options))
+
+    return suite
+
+class DocFileCase(DocTestCase):
+
+    def id(self):
+        return '_'.join(self._dt_test.name.split('.'))
+
+    def __repr__(self):
+        return self._dt_test.filename
+    __str__ = __repr__
+
+    def format_failure(self, err):
+        return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
+                % (self._dt_test.name, self._dt_test.filename, err)
+                )
+
+def DocFileTest(path, module_relative=True, package=None,
+                globs=None, parser=DocTestParser(), **options):
+    if globs is None:
+        globs = {}
+
+    if package and not module_relative:
+        raise ValueError("Package may only be specified for module-"
+                         "relative paths.")
+
+    # Relativize the path.
+    if module_relative:
+        package = _normalize_module(package)
+        path = _module_relative_path(package, path)
+
+    # Find the file and read it.
+    name = os.path.basename(path)
+    doc = open(path).read()
+
+    # Convert it to a test, and wrap it in a DocFileCase.
+    test = parser.get_doctest(doc, globs, name, path, 0)
+    return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+    """A unittest suite for one or more doctest files.
+
+    The path to each doctest file is given as a string; the
+    interpretation of that string depends on the keyword argument
+    "module_relative".
+
+    A number of options may be provided as keyword arguments:
+
+    module_relative
+      If "module_relative" is True, then the given file paths are
+      interpreted as os-independent module-relative paths.  By
+      default, these paths are relative to the calling module's
+      directory; but if the "package" argument is specified, then
+      they are relative to that package.  To ensure os-independence,
+      "filename" should use "/" characters to separate path
+      segments, and may not be an absolute path (i.e., it may not
+      begin with "/").
+
+      If "module_relative" is False, then the given file paths are
+      interpreted as os-specific paths.  These paths may be absolute
+      or relative (to the current working directory).
+
+    package
+      A Python package or the name of a Python package whose directory
+      should be used as the base directory for module relative paths.
+      If "package" is not specified, then the calling module's
+      directory is used as the base directory for module relative
+      filenames.  It is an error to specify "package" if
+      "module_relative" is False.
+
+    setUp
+      A set-up function.  This is called before running the
+      tests in each file. The setUp function will be passed a DocTest
+      object.  The setUp function can access the test globals as the
+      globs attribute of the test passed.
+
+    tearDown
+      A tear-down function.  This is called after running the
+      tests in each file.  The tearDown function will be passed a DocTest
+      object.  The tearDown function can access the test globals as the
+      globs attribute of the test passed.
+
+    globs
+      A dictionary containing initial global variables for the tests.
+
+    optionflags
+      A set of doctest option flags expressed as an integer.
+
+    parser
+      A DocTestParser (or subclass) that should be used to extract
+      tests from the files.
+    """
+    suite = unittest.TestSuite()
+
+    # We do this here so that _normalize_module is called at the right
+    # level.  If it were called in DocFileTest, then this function
+    # would be the caller and we might guess the package incorrectly.
+    if kw.get('module_relative', True):
+        kw['package'] = _normalize_module(kw.get('package'))
+
+    for path in paths:
+        suite.addTest(DocFileTest(path, **kw))
+
+    return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+    output = []
+    for piece in DocTestParser().parse(s):
+        if isinstance(piece, Example):
+            # Add the example's source code (strip trailing NL)
+            output.append(piece.source[:-1])
+            # Add the expected output:
+            want = piece.want
+            if want:
+                output.append('# Expected:')
+                output += ['## '+l for l in want.split('\n')[:-1]]
+        else:
+            # Add non-example text.
+            output += [_comment_line(l)
+                       for l in piece.split('\n')[:-1]]
+
+    # Trim junk on both ends.
+    while output and output[-1] == '#':
+        output.pop()
+    while output and output[0] == '#':
+        output.pop(0)
+    # Combine the output, and return it.
+    # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+    return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+    """Extract the test sources from a doctest docstring as a script.
+
+    Provide the module (or dotted name of the module) containing the
+    test to be debugged and the name (within the module) of the object
+    with the doc string with tests to be debugged.
+    """
+    module = _normalize_module(module)
+    tests = DocTestFinder().find(module)
+    test = [t for t in tests if t.name == name]
+    if not test:
+        raise ValueError(name, "not found in tests")
+    test = test[0]
+    testsrc = script_from_examples(test.docstring)
+    return testsrc
+
+def debug_src(src, pm=False, globs=None):
+    """Debug a single doctest docstring, in argument `src`'"""
+    testsrc = script_from_examples(src)
+    debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+    "Debug a test script.  `src` is the script, as a string."
+    import pdb
+
+    # Note that tempfile.NameTemporaryFile() cannot be used.  As the
+    # docs say, a file so created cannot be opened by name a second time
+    # on modern Windows boxes, and execfile() needs to open it.
+    srcfilename = tempfile.mktemp(".py", "doctestdebug")
+    f = open(srcfilename, 'w')
+    f.write(src)
+    f.close()
+
+    try:
+        if globs:
+            globs = globs.copy()
+        else:
+            globs = {}
+
+        if pm:
+            try:
+                execfile(srcfilename, globs, globs)
+            except:
+                print sys.exc_info()[1]
+                pdb.post_mortem(sys.exc_info()[2])
+        else:
+            # Note that %r is vital here.  '%s' instead can, e.g., cause
+            # backslashes to get treated as metacharacters on Windows.
+            pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+    finally:
+        os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+    """Debug a single doctest docstring.
+
+    Provide the module (or dotted name of the module) containing the
+    test to be debugged and the name (within the module) of the object
+    with the docstring with tests to be debugged.
+    """
+    module = _normalize_module(module)
+    testsrc = testsource(module, name)
+    debug_script(testsrc, pm, module.__dict__)
+
+
+__test__ = {}
diff --git a/nose/failure.py b/nose/failure.py
new file mode 100644 (file)
index 0000000..1dff970
--- /dev/null
@@ -0,0 +1,39 @@
+import logging
+import unittest
+from traceback import format_tb
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Failure']
+
+
+class Failure(unittest.TestCase):
+    """Unloadable or unexecutable test.
+
+    A Failure case is placed in a test suite to indicate the presence of a
+    test that could not be loaded or executed. A common example is a test
+    module that fails to import.
+    
+    """
+    __test__ = False # do not collect
+    def __init__(self, exc_class, exc_val, tb=None, address=None):
+        log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
+        self.exc_class = exc_class
+        self.exc_val = exc_val
+        self.tb = tb
+        self._address = address
+        unittest.TestCase.__init__(self)
+
+    def __str__(self):
+        return "Failure: %s (%s)" % (
+            getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
+
+    def address(self):
+        return self._address
+    
+    def runTest(self):
+        if self.tb is not None:            
+            raise self.exc_class, self.exc_val, self.tb
+        else:
+            raise self.exc_class(self.exc_val)
diff --git a/nose/importer.py b/nose/importer.py
new file mode 100644 (file)
index 0000000..c971b79
--- /dev/null
@@ -0,0 +1,154 @@
+"""Implements an importer that looks only in specific path (ignoring
+sys.path), and uses a per-path cache in addition to sys.modules. This is
+necessary because test modules in different directories frequently have the
+same names, which means that the first loaded would mask the rest when using
+the builtin importer.
+"""
+import logging
+import os
+import sys
+from nose.config import Config
+
+from imp import find_module, load_module, acquire_lock, release_lock
+
+log = logging.getLogger(__name__)
+
+class Importer(object):
+    """An importer class that does only path-specific imports. That
+    is, the given module is not searched for on sys.path, but only at
+    the path or in the directory specified.
+    """
+    def __init__(self, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+
+    def importFromPath(self, path, fqname):
+        """Import a dotted-name package whose tail is at path. In other words,
+        given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
+        bar from path/to/foo/bar, returning bar.
+        """
+        # find the base dir of the package
+        path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
+        name_parts = fqname.split('.')
+        if path_parts[-1].startswith('__init__'):
+            path_parts.pop()
+        path_parts = path_parts[:-(len(name_parts))]
+        dir_path = os.sep.join(path_parts)
+        # then import fqname starting from that dir
+        return self.importFromDir(dir_path, fqname)                
+
+    def importFromDir(self, dir, fqname):
+        """Import a module *only* from path, ignoring sys.path and
+        reloading if the version in sys.modules is not the one we want.
+        """
+        dir = os.path.normpath(os.path.abspath(dir))
+        log.debug("Import %s from %s", fqname, dir)
+
+        # FIXME reimplement local per-dir cache?
+        
+        # special case for __main__
+        if fqname == '__main__':
+            return sys.modules[fqname]
+        
+        if self.config.addPaths:
+            add_path(dir, self.config)
+            
+        path = [dir]
+        parts = fqname.split('.')
+        part_fqname = ''
+        mod = parent = fh = None
+
+        for part in parts:
+            if part_fqname == '':
+                part_fqname = part
+            else:
+                part_fqname = "%s.%s" % (part_fqname, part)
+            try:
+                acquire_lock()
+                log.debug("find module part %s (%s) in %s",
+                          part, part_fqname, path)
+                fh, filename, desc = find_module(part, path)
+                old = sys.modules.get(part_fqname)
+                if old is not None:
+                    # test modules frequently have name overlap; make sure
+                    # we get a fresh copy of anything we are trying to load
+                    # from a new path
+                    log.debug("sys.modules has %s as %s", part_fqname, old)
+                    if (self.sameModule(old, filename)
+                        or (self.config.firstPackageWins and
+                            getattr(old, '__path__', None))):
+                        mod = old
+                    else:
+                        del sys.modules[part_fqname]
+                        mod = load_module(part_fqname, fh, filename, desc)
+                else:
+                    mod = load_module(part_fqname, fh, filename, desc)
+            finally:
+                if fh:
+                    fh.close()
+                release_lock()
+            if parent:
+                setattr(parent, part, mod)
+            if hasattr(mod, '__path__'):
+                path = mod.__path__
+            parent = mod
+        return mod
+
+    def sameModule(self, mod, filename):
+        mod_paths = []
+        if hasattr(mod, '__path__'):
+            for path in mod.__path__:
+                mod_paths.append(os.path.dirname(
+                    os.path.normpath(
+                    os.path.abspath(path))))
+        elif hasattr(mod, '__file__'):
+            mod_paths.append(os.path.dirname(
+                os.path.normpath(
+                os.path.abspath(mod.__file__))))
+        else:
+            # builtin or other module-like object that
+            # doesn't have __file__; must be new
+            return False
+        new_path = os.path.dirname(os.path.normpath(filename))
+        for mod_path in mod_paths:
+            log.debug(
+                "module already loaded? mod: %s new: %s",
+                mod_path, new_path)
+            if mod_path == new_path:
+                return True
+        return False
+
+
+def add_path(path, config=None):
+    """Ensure that the path, or the root of the current package (if
+    path is in a package), is in sys.path.
+    """
+
+    # FIXME add any src-looking dirs seen too... need to get config for that
+    
+    log.debug('Add path %s' % path)    
+    if not path:
+        return []
+    added = []
+    parent = os.path.dirname(path)
+    if (parent
+        and os.path.exists(os.path.join(path, '__init__.py'))):
+        added.extend(add_path(parent, config))
+    elif not path in sys.path:
+        log.debug("insert %s into sys.path", path)
+        sys.path.insert(0, path)
+        added.append(path)
+    if config and config.srcDirs:
+        for dirname in config.srcDirs:
+            dirpath = os.path.join(path, dirname)
+            if os.path.isdir(dirpath):
+                sys.path.insert(0, dirpath)
+                added.append(dirpath)
+    return added
+
+
+def remove_path(path):
+    log.debug('Remove path %s' % path)
+    if path in sys.path:
+        sys.path.remove(path)
diff --git a/nose/inspector.py b/nose/inspector.py
new file mode 100644 (file)
index 0000000..a6c4a3e
--- /dev/null
@@ -0,0 +1,207 @@
+"""Simple traceback introspection. Used to add additional information to
+AssertionErrors in tests, so that failure messages may be more informative.
+"""
+import inspect
+import logging
+import re
+import sys
+import textwrap
+import tokenize
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+def inspect_traceback(tb):
+    """Inspect a traceback and its frame, returning source for the expression
+    where the exception was raised, with simple variable replacement performed
+    and the line on which the exception was raised marked with '>>'
+    """
+    log.debug('inspect traceback %s', tb)
+
+    # we only want the innermost frame, where the exception was raised
+    while tb.tb_next:
+        tb = tb.tb_next
+        
+    frame = tb.tb_frame
+    lines, exc_line = tbsource(tb)
+        
+    # figure out the set of lines to grab.
+    inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
+    src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+    exp = Expander(frame.f_locals, frame.f_globals)
+
+    while inspect_lines:
+        try:
+            for tok in tokenize.generate_tokens(src.readline):
+                exp(*tok)
+        except tokenize.TokenError, e:
+            # this can happen if our inspectable region happens to butt up
+            # against the end of a construct like a docstring with the closing
+            # """ on separate line
+            log.debug("Tokenizer error: %s", e)
+            inspect_lines.pop(0)
+            mark_line -= 1
+            src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+            exp = Expander(frame.f_locals, frame.f_globals)
+            continue
+        break
+    padded = []
+    if exp.expanded_source:
+        exp_lines = exp.expanded_source.split('\n')
+        ep = 0
+        for line in exp_lines:
+            if ep == mark_line:
+                padded.append('>>  ' + line)
+            else:
+                padded.append('    ' + line)
+            ep += 1
+    return '\n'.join(padded)
+
+
+def tbsource(tb, context=6):
+    """Get source from  a traceback object.
+
+    A tuple of two things is returned: a list of lines of context from
+    the source code, and the index of the current line within that list.
+    The optional second argument specifies the number of lines of context
+    to return, which are centered around the current line.
+
+    .. Note ::
+       This is adapted from inspect.py in the python 2.4 standard library, 
+       since a bug in the 2.3 version of inspect prevents it from correctly
+       locating source lines in a traceback frame.
+    """
+    
+    lineno = tb.tb_lineno
+    frame = tb.tb_frame
+
+    if context > 0:
+        start = lineno - 1 - context//2
+        log.debug("lineno: %s start: %s", lineno, start)
+        
+        try:
+            lines, dummy = inspect.findsource(frame)
+        except IOError:
+            lines, index = [''], 0
+        else:
+            all_lines = lines
+            start = max(start, 1)
+            start = max(0, min(start, len(lines) - context))
+            lines = lines[start:start+context]
+            index = lineno - 1 - start
+            
+            # python 2.5 compat: if previous line ends in a continuation,
+            # decrement start by 1 to match 2.4 behavior                
+            if sys.version_info >= (2, 5) and index > 0:
+                while lines[index-1].strip().endswith('\\'):
+                    start -= 1
+                    lines = all_lines[start:start+context]
+    else:
+        lines, index = [''], 0
+    log.debug("tbsource lines '''%s''' around index %s", lines, index)
+    return (lines, index)    
+
+    
+def find_inspectable_lines(lines, pos):
+    """Find lines in home that are inspectable.
+    
+    Walk back from the err line up to 3 lines, but don't walk back over
+    changes in indent level.
+
+    Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
+    over changes in indent level (unless part of an extended line)
+    """
+    cnt = re.compile(r'\\[\s\n]*$')
+    df = re.compile(r':[\s\n]*$')
+    ind = re.compile(r'^(\s*)')
+    toinspect = []
+    home = lines[pos]
+    home_indent = ind.match(home).groups()[0]
+    
+    before = lines[max(pos-3, 0):pos]
+    before.reverse()
+    after = lines[pos+1:min(pos+4, len(lines))]
+
+    for line in before:
+        if ind.match(line).groups()[0] == home_indent:
+            toinspect.append(line)
+        else:
+            break
+    toinspect.reverse()
+    toinspect.append(home)
+    home_pos = len(toinspect)-1
+    continued = cnt.search(home)
+    for line in after:
+        if ((continued or ind.match(line).groups()[0] == home_indent)
+            and not df.search(line)):
+            toinspect.append(line)
+            continued = cnt.search(line)
+        else:
+            break
+    log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
+    return toinspect, home_pos
+
+
+class Expander:
+    """Simple expression expander. Uses tokenize to find the names and
+    expands any that can be looked up in the frame.
+    """
+    def __init__(self, locals, globals):
+        self.locals = locals
+        self.globals = globals
+        self.lpos = None
+        self.expanded_source = ''
+         
+    def __call__(self, ttype, tok, start, end, line):
+        # TODO
+        # deal with unicode properly
+        
+        # TODO
+        # Dealing with instance members
+        #   always keep the last thing seen  
+        #   if the current token is a dot,
+        #      get ready to getattr(lastthing, this thing) on the
+        #      next call.
+        
+        if self.lpos is not None:
+            if start[1] >= self.lpos:
+                self.expanded_source += ' ' * (start[1]-self.lpos)
+            elif start[1] < self.lpos:
+                # newline, indent correctly
+                self.expanded_source += ' ' * start[1]
+        self.lpos = end[1]
+      
+        if ttype == tokenize.INDENT:
+            pass
+        elif ttype == tokenize.NAME:
+            # Clean this junk up
+            try:
+                val = self.locals[tok]
+                if callable(val):
+                    val = tok
+                else:
+                    val = repr(val)
+            except KeyError:
+                try:
+                    val = self.globals[tok]
+                    if callable(val):
+                        val = tok
+                    else:
+                        val = repr(val)
+
+                except KeyError:
+                    val = tok
+            # FIXME... not sure how to handle things like funcs, classes
+            # FIXME this is broken for some unicode strings
+            self.expanded_source += val
+        else:
+            self.expanded_source += tok
+        # if this is the end of the line and the line ends with
+        # \, then tack a \ and newline onto the output
+        # print line[end[1]:]
+        if re.match(r'\s+\\\n', line[end[1]:]):
+            self.expanded_source += ' \\\n'
diff --git a/nose/loader.py b/nose/loader.py
new file mode 100644 (file)
index 0000000..1103099
--- /dev/null
@@ -0,0 +1,595 @@
+"""
+Test Loader
+-----------
+
+nose's test loader implements the same basic functionality as its
+superclass, unittest.TestLoader, but extends it by more liberal
+interpretations of what may be a test and how a test may be named.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+import types
+from inspect import isfunction
+from nose.pyversion import unbound_method, ismethod
+from nose.case import FunctionTestCase, MethodTestCase
+from nose.failure import Failure
+from nose.config import Config
+from nose.importer import Importer, add_path, remove_path
+from nose.selector import defaultSelector, TestAddress
+from nose.util import func_lineno, getpackage, isclass, isgenerator, \
+    ispackage, regex_last_key, resolve_name, transplant_func, \
+    transplant_class, test_address
+from nose.suite import ContextSuiteFactory, ContextList, LazySuite
+from nose.pyversion import sort_list, cmp_to_key
+
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# for efficiency and easier mocking
+op_normpath = os.path.normpath
+op_abspath = os.path.abspath
+op_join = os.path.join
+op_isdir = os.path.isdir
+op_isfile = os.path.isfile
+
+
+__all__ = ['TestLoader', 'defaultTestLoader']
+
+
+class TestLoader(unittest.TestLoader):
+    """Test loader that extends unittest.TestLoader to:
+
+    * Load tests from test-like functions and classes that are not
+      unittest.TestCase subclasses
+    * Find and load test modules in a directory
+    * Support tests that are generators
+    * Support easy extensions of or changes to that behavior through plugins
+    """
+    config = None
+    importer = None
+    workingDir = None
+    selector = None
+    suiteClass = None
+    
+    def __init__(self, config=None, importer=None, workingDir=None,
+                 selector=None):
+        """Initialize a test loader.
+
+        Parameters (all optional):
+
+        * config: provide a `nose.config.Config`_ or other config class
+          instance; if not provided a `nose.config.Config`_ with
+          default values is used.          
+        * importer: provide an importer instance that implements
+          `importFromPath`. If not provided, a
+          `nose.importer.Importer`_ is used.
+        * workingDir: the directory to which file and module names are
+          relative. If not provided, assumed to be the current working
+          directory.
+        * selector: a selector class or instance. If a class is
+          provided, it will be instantiated with one argument, the
+          current config. If not provided, a `nose.selector.Selector`_
+          is used.
+        """
+        if config is None:
+            config = Config()
+        if importer is None:
+            importer = Importer(config=config)
+        if workingDir is None:
+            workingDir = config.workingDir
+        if selector is None:
+            selector = defaultSelector(config)
+        elif isclass(selector):
+            selector = selector(config)
+        self.config = config
+        self.importer = importer
+        self.workingDir = op_normpath(op_abspath(workingDir))
+        self.selector = selector
+        if config.addPaths:
+            add_path(workingDir, config)        
+        self.suiteClass = ContextSuiteFactory(config=config)
+        unittest.TestLoader.__init__(self)     
+
+    def getTestCaseNames(self, testCaseClass):
+        """Override to select with selector, unless
+        config.getTestCaseNamesCompat is True
+        """
+        if self.config.getTestCaseNamesCompat:
+            return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
+        
+        def wanted(attr, cls=testCaseClass, sel=self.selector):
+            item = getattr(cls, attr, None)
+            if isfunction(item):
+                item = unbound_method(cls, item)
+            elif not ismethod(item):
+                return False
+            return sel.wantMethod(item)
+        cases = filter(wanted, dir(testCaseClass))
+        for base in testCaseClass.__bases__:
+            for case in self.getTestCaseNames(base):
+                if case not in cases:
+                    cases.append(case)
+        # add runTest if nothing else picked
+        if not cases and hasattr(testCaseClass, 'runTest'):
+            cases = ['runTest']
+        if self.sortTestMethodsUsing:
+            sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
+        return cases
+
+    def loadTestsFromDir(self, path):
+        """Load tests from the directory at path. This is a generator
+        -- each suite of tests from a module or other file is yielded
+        and is expected to be executed before the next file is
+        examined.
+        """        
+        log.debug("load from dir %s", path)
+        plugins = self.config.plugins
+        plugins.beforeDirectory(path)
+        if self.config.addPaths:
+            paths_added = add_path(path, self.config)
+
+        entries = os.listdir(path)
+        sort_list(entries, regex_last_key(self.config.testMatch))
+        for entry in entries:
+            # this hard-coded initial-dot test will be removed:
+            # http://code.google.com/p/python-nose/issues/detail?id=82
+            if entry.startswith('.'):
+                continue
+            entry_path = op_abspath(op_join(path, entry))
+            is_file = op_isfile(entry_path)
+            wanted = False
+            if is_file:
+                is_dir = False
+                wanted = self.selector.wantFile(entry_path)
+            else:
+                is_dir = op_isdir(entry_path)
+                if is_dir:
+                    # this hard-coded initial-underscore test will be removed:
+                    # http://code.google.com/p/python-nose/issues/detail?id=82
+                    if entry.startswith('_'):
+                        continue
+                    wanted = self.selector.wantDirectory(entry_path)
+            is_package = ispackage(entry_path)
+            if wanted:
+                if is_file:
+                    plugins.beforeContext()
+                    if entry.endswith('.py'):
+                        yield self.loadTestsFromName(
+                            entry_path, discovered=True)
+                    else:
+                        yield self.loadTestsFromFile(entry_path)
+                    plugins.afterContext()
+                elif is_package:
+                    # Load the entry as a package: given the full path,
+                    # loadTestsFromName() will figure it out
+                    yield self.loadTestsFromName(
+                        entry_path, discovered=True)
+                else:
+                    # Another test dir in this one: recurse lazily
+                    yield self.suiteClass(
+                        lambda: self.loadTestsFromDir(entry_path))
+        tests = []
+        for test in plugins.loadTestsFromDir(path):
+            tests.append(test)
+        # TODO: is this try/except needed?
+        try:
+            if tests:
+                yield self.suiteClass(tests)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            yield self.suiteClass([Failure(*sys.exc_info())])
+        
+        # pop paths
+        if self.config.addPaths:
+            for p in paths_added:
+              remove_path(p)
+        plugins.afterDirectory(path)
+
+    def loadTestsFromFile(self, filename):
+        """Load tests from a non-module file. Default is to raise a
+        ValueError; plugins may implement `loadTestsFromFile` to
+        provide a list of tests loaded from the file.
+        """
+        log.debug("Load from non-module file %s", filename)
+        try:
+            tests = [test for test in
+                     self.config.plugins.loadTestsFromFile(filename)]
+            if tests:
+                # Plugins can yield False to indicate that they were
+                # unable to load tests from a file, but it was not an
+                # error -- the file just had no tests to load.
+                tests = filter(None, tests)
+                return self.suiteClass(tests)
+            else:
+                # Nothing was able to even try to load from this file
+                open(filename, 'r').close() # trigger os error
+                raise ValueError("Unable to load tests from file %s"
+                                 % filename)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            return self.suiteClass(
+                [Failure(exc[0], exc[1], exc[2],
+                         address=(filename, None, None))])
+
+    def loadTestsFromGenerator(self, generator, module):
+        """Lazy-load tests from a generator function. The generator function
+        may yield either:
+
+        * a callable, or
+        * a function name resolvable within the same module
+        """
+        def generate(g=generator, m=module):
+            try:
+                for test in g():
+                    test_func, arg = self.parseGeneratedTest(test)
+                    if not callable(test_func):
+                        test_func = getattr(m, test_func)
+                    yield FunctionTestCase(test_func, arg=arg, descriptor=g)
+            except KeyboardInterrupt:
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(exc[0], exc[1], exc[2],
+                              address=test_address(generator))
+        return self.suiteClass(generate, context=generator, can_split=False)
+
+    def loadTestsFromGeneratorMethod(self, generator, cls):
+        """Lazy-load tests from a generator method.
+
+        This is more complicated than loading from a generator function,
+        since a generator method may yield:
+
+        * a function
+        * a bound or unbound method, or
+        * a method name
+        """
+        # convert the unbound generator method
+        # into a bound method so it can be called below
+        if hasattr(generator, 'im_class'):
+            cls = generator.im_class
+        inst = cls()
+        method = generator.__name__
+        generator = getattr(inst, method)
+
+        def generate(g=generator, c=cls):
+            try:
+                for test in g():
+                    test_func, arg = self.parseGeneratedTest(test)
+                    if not callable(test_func):
+                        test_func = unbound_method(c, getattr(c, test_func))
+                    if ismethod(test_func):
+                        yield MethodTestCase(test_func, arg=arg, descriptor=g)
+                    elif isfunction(test_func):
+                        # In this case we're forcing the 'MethodTestCase'
+                        # to run the inline function as its test call,
+                        # but using the generator method as the 'method of
+                        # record' (so no need to pass it as the descriptor)
+                        yield MethodTestCase(g, test=test_func, arg=arg)
+                    else:
+                        yield Failure(
+                            TypeError,
+                            "%s is not a function or method" % test_func)
+            except KeyboardInterrupt:
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(exc[0], exc[1], exc[2],
+                              address=test_address(generator))
+        return self.suiteClass(generate, context=generator, can_split=False)
+
+    def loadTestsFromModule(self, module, path=None, discovered=False):
+        """Load all tests from module and return a suite containing
+        them. If the module has been discovered and is not test-like,
+        the suite will be empty by default, though plugins may add
+        their own tests.
+        """
+        log.debug("Load from module %s", module)
+        tests = []
+        test_classes = []
+        test_funcs = []
+        # For *discovered* modules, we only load tests when the module looks
+        # testlike. For modules we've been directed to load, we always
+        # look for tests. (discovered is set to True by loadTestsFromDir)
+        if not discovered or self.selector.wantModule(module):
+            for item in dir(module):
+                test = getattr(module, item, None)
+                # print "Check %s (%s) in %s" % (item, test, module.__name__)
+                if isclass(test):
+                    if self.selector.wantClass(test):
+                        test_classes.append(test)
+                elif isfunction(test) and self.selector.wantFunction(test):
+                    test_funcs.append(test)
+            sort_list(test_classes, lambda x: x.__name__)
+            sort_list(test_funcs, func_lineno)
+            tests = map(lambda t: self.makeTest(t, parent=module),
+                        test_classes + test_funcs)
+
+        # Now, descend into packages
+        # FIXME can or should this be lazy?
+        # is this syntax 2.2 compatible?
+        module_paths = getattr(module, '__path__', [])
+        if path:
+            path = os.path.realpath(path)
+        for module_path in module_paths:
+            log.debug("Load tests from module path %s?", module_path)
+            log.debug("path: %s os.path.realpath(%s): %s",
+                      path, module_path, os.path.realpath(module_path))
+            if (self.config.traverseNamespace or not path) or \
+                    os.path.realpath(module_path).startswith(path):
+                tests.extend(self.loadTestsFromDir(module_path))
+            
+        for test in self.config.plugins.loadTestsFromModule(module, path):
+            tests.append(test)
+
+        return self.suiteClass(ContextList(tests, context=module))
+    
+    def loadTestsFromName(self, name, module=None, discovered=False):
+        """Load tests from the entity with the given name.
+
+        The name may indicate a file, directory, module, or any object
+        within a module. See `nose.util.split_test_name` for details on
+        test name parsing.
+        """
+        # FIXME refactor this method into little bites?
+        log.debug("load from %s (%s)", name, module)
+        
+        suite = self.suiteClass
+
+        # give plugins first crack
+        plug_tests = self.config.plugins.loadTestsFromName(name, module)
+        if plug_tests:
+            return suite(plug_tests)
+        
+        addr = TestAddress(name, workingDir=self.workingDir)
+        if module:
+            # Two cases:
+            #  name is class.foo
+            #    The addr will be incorrect, since it thinks class.foo is
+            #    a dotted module name. It's actually a dotted attribute
+            #    name. In this case we want to use the full submitted
+            #    name as the name to load from the module.
+            #  name is module:class.foo
+            #    The addr will be correct. The part we want is the part after
+            #    the :, which is in addr.call.
+            if addr.call:
+                name = addr.call
+            parent, obj = self.resolve(name, module)
+            if (isclass(parent)
+                and getattr(parent, '__module__', None) != module.__name__):
+                parent = transplant_class(parent, module.__name__)
+                obj = getattr(parent, obj.__name__)
+            log.debug("parent %s obj %s module %s", parent, obj, module)
+            if isinstance(obj, Failure):
+                return suite([obj])
+            else:
+                return suite(ContextList([self.makeTest(obj, parent)],
+                                         context=parent))
+        else:
+            if addr.module:
+                try:
+                    if addr.filename is None:
+                        module = resolve_name(addr.module)
+                    else:
+                        self.config.plugins.beforeImport(
+                            addr.filename, addr.module)
+                        # FIXME: to support module.name names,
+                        # do what resolve-name does and keep trying to
+                        # import, popping tail of module into addr.call,
+                        # until we either get an import or run out of
+                        # module parts
+                        try:
+                            module = self.importer.importFromPath(
+                                addr.filename, addr.module)
+                        finally:
+                            self.config.plugins.afterImport(
+                                addr.filename, addr.module)
+                except (KeyboardInterrupt, SystemExit):
+                    raise
+                except:
+                    exc = sys.exc_info()
+                    return suite([Failure(exc[0], exc[1], exc[2],
+                                          address=addr.totuple())])
+                if addr.call:
+                    return self.loadTestsFromName(addr.call, module)
+                else:
+                    return self.loadTestsFromModule(
+                        module, addr.filename,
+                        discovered=discovered)
+            elif addr.filename:
+                path = addr.filename
+                if addr.call:
+                    package = getpackage(path)
+                    if package is None:
+                        return suite([
+                            Failure(ValueError,
+                                    "Can't find callable %s in file %s: "
+                                    "file is not a python module" %
+                                    (addr.call, path),
+                                    address=addr.totuple())])
+                    return self.loadTestsFromName(addr.call, module=package)
+                else:
+                    if op_isdir(path):
+                        # In this case we *can* be lazy since we know
+                        # that each module in the dir will be fully
+                        # loaded before its tests are executed; we
+                        # also know that we're not going to be asked
+                        # to load from . and ./some_module.py *as part
+                        # of this named test load*
+                        return LazySuite(
+                            lambda: self.loadTestsFromDir(path))
+                    elif op_isfile(path):
+                        return self.loadTestsFromFile(path)
+                    else:
+                        return suite([
+                                Failure(OSError, "No such file %s" % path,
+                                        address=addr.totuple())])
+            else:
+                # just a function? what to do? I think it can only be
+                # handled when module is not None
+                return suite([
+                    Failure(ValueError, "Unresolvable test name %s" % name,
+                            address=addr.totuple())])
+
+    def loadTestsFromNames(self, names, module=None):
+        """Load tests from all names, returning a suite containing all
+        tests.
+        """
+        plug_res = self.config.plugins.loadTestsFromNames(names, module)
+        if plug_res:
+            suite, names = plug_res
+            if suite:
+                return self.suiteClass([
+                    self.suiteClass(suite),
+                    unittest.TestLoader.loadTestsFromNames(self, names, module)
+                    ])
+        return unittest.TestLoader.loadTestsFromNames(self, names, module)
+
+    def loadTestsFromTestCase(self, testCaseClass):
+        """Load tests from a unittest.TestCase subclass.
+        """
+        cases = []
+        plugins = self.config.plugins
+        for case in plugins.loadTestsFromTestCase(testCaseClass):
+            cases.append(case)
+        # For efficiency in the most common case, just call and return from
+        # super. This avoids having to extract cases and rebuild a context
+        # suite when there are no plugin-contributed cases.
+        if not cases:
+            return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
+        cases.extend(
+            [case for case in
+             super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
+        return self.suiteClass(cases)
+    
+    def loadTestsFromTestClass(self, cls):
+        """Load tests from a test class that is *not* a unittest.TestCase
+        subclass.
+
+        In this case, we can't depend on the class's `__init__` taking method
+        name arguments, so we have to compose a MethodTestCase for each
+        method in the class that looks testlike.
+        """
+        def wanted(attr, cls=cls, sel=self.selector):
+            item = getattr(cls, attr, None)
+            if isfunction(item):
+                item = unbound_method(cls, item)
+            elif not ismethod(item):
+                return False
+            return sel.wantMethod(item)
+        cases = [self.makeTest(getattr(cls, case), cls)
+                 for case in filter(wanted, dir(cls))]
+        for test in self.config.plugins.loadTestsFromTestClass(cls):
+            cases.append(test)
+        return self.suiteClass(ContextList(cases, context=cls))
+
+    def makeTest(self, obj, parent=None):
+        try:
+            return self._makeTest(obj, parent)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            try:
+                addr = test_address(obj)
+            except KeyboardInterrupt:
+                raise
+            except:
+                addr = None
+            return Failure(exc[0], exc[1], exc[2], address=addr)
+    
+    def _makeTest(self, obj, parent=None):
+        """Given a test object and its parent, return a test case
+        or test suite.
+        """
+        plug_tests = []
+        try:
+            addr = test_address(obj)
+        except KeyboardInterrupt:
+            raise
+        except:
+            addr = None
+        for test in self.config.plugins.makeTest(obj, parent):
+            plug_tests.append(test)
+        # TODO: is this try/except needed?
+        try:
+            if plug_tests:
+                return self.suiteClass(plug_tests)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            return Failure(exc[0], exc[1], exc[2], address=addr)
+        
+        if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
+           # This is a Python 3.x 'unbound method'.  Wrap it with its
+           # associated class..
+            obj = unbound_method(parent, obj)
+
+        if isinstance(obj, unittest.TestCase):
+            return obj
+        elif isclass(obj):
+            if parent and obj.__module__ != parent.__name__:
+                obj = transplant_class(obj, parent.__name__)
+            if issubclass(obj, unittest.TestCase):
+                return self.loadTestsFromTestCase(obj)
+            else:
+                return self.loadTestsFromTestClass(obj)
+        elif ismethod(obj):
+            if parent is None:
+                parent = obj.__class__
+            if issubclass(parent, unittest.TestCase):
+                return parent(obj.__name__)
+            else:
+                if isgenerator(obj):
+                    return self.loadTestsFromGeneratorMethod(obj, parent)
+                else:
+                    return MethodTestCase(obj)
+        elif isfunction(obj):
+            if parent and obj.__module__ != parent.__name__:
+                obj = transplant_func(obj, parent.__name__)
+            if isgenerator(obj):
+                return self.loadTestsFromGenerator(obj, parent)
+            else:
+                return FunctionTestCase(obj)
+        else:
+            return Failure(TypeError,
+                           "Can't make a test from %s" % obj,
+                           address=addr)
+
+    def resolve(self, name, module):
+        """Resolve name within module
+        """
+        obj = module
+        parts = name.split('.')
+        for part in parts:
+            parent, obj = obj, getattr(obj, part, None)
+        if obj is None:
+            # no such test
+            obj = Failure(ValueError, "No such test %s" % name)
+        return parent, obj
+
+    def parseGeneratedTest(self, test):
+        """Given the yield value of a test generator, return a func and args.
+
+        This is used in the two loadTestsFromGenerator* methods.
+
+        """
+        if not isinstance(test, tuple):         # yield test
+            test_func, arg = (test, tuple())
+        elif len(test) == 1:                    # yield (test,)
+            test_func, arg = (test[0], tuple())
+        else:                                   # yield test, foo, bar, ...
+            assert len(test) > 1 # sanity check
+            test_func, arg = (test[0], test[1:])
+        return test_func, arg
+
+defaultTestLoader = TestLoader
+
diff --git a/nose/plugins/__init__.py b/nose/plugins/__init__.py
new file mode 100644 (file)
index 0000000..260b628
--- /dev/null
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+  :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+  this class is for documentary purposes only; plugins may not subclass
+  IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin.  It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+    import logging
+    import os
+
+    from nose.plugins import Plugin
+
+    log = logging.getLogger('nose.plugins.helloworld')
+
+    class HelloWorld(Plugin):
+        name = 'helloworld'
+
+        def options(self, parser, env=os.environ):
+            super(HelloWorld, self).options(parser, env=env)
+
+        def configure(self, options, conf):
+            super(HelloWorld, self).configure(options, conf)
+            if not self.enabled:
+                return
+
+        def finalize(self, result):
+            log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+  Important note: the following applies only to the default
+  plugin manager. Other plugin managers may use different means to
+  locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+    setup(name='Some plugin',
+        # ...
+        entry_points = {
+            'nose.plugins.0.10': [
+                'someplugin = someplugin:SomePlugin'
+                ]
+            },
+        # ...
+        )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+    import nose
+    from yourplugin import YourPlugin
+
+    if __name__ == '__main__':
+        nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+  Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+  results. If you also want to monitor output, implement
+  ``setOutputStream`` and keep a reference to the output stream. If you
+  want to prevent the builtin ``TextTestResult`` output, implement
+  ``setOutputSteam`` and *return a dummy stream*. The default output will go
+  to the dummy stream, while you send your desired output to the real stream.
+
+  Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+  Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+  Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+  :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+  Implement ``formatError`` and/or ``formatFailture``. The error tuple
+  you return (error class, error message, traceback) will replace the
+  original error tuple.
+
+  Examples: :doc:`nose.plugins.capture <capture>`,
+  :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+  Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+  return True for files that you want to examine for tests. In
+  ``loadTestsFromFile``, for those files, return an iterable
+  containing TestCases (or yield them as you find them;
+  ``loadTestsFromFile`` may also be a generator).
+
+  Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+  Implement ``begin`` if you need to perform setup before testing
+  begins. Implement ``report`` and output your report to the provided stream.
+
+  Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+  Implement any or all ``want*``  methods. Return False to reject the test
+  candidate, True to accept it -- which  means that the test candidate
+  will pass through the rest of the system, so you must be prepared to
+  load tests from it if tests can't be loaded by the core loader or
+  another plugin -- and None if you don't care.
+
+  Examples: :doc:`nose.plugins.attrib <attrib>`,
+  :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/nose/plugins/allmodules.py b/nose/plugins/allmodules.py
new file mode 100644 (file)
index 0000000..1ccd777
--- /dev/null
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+   This plugin can have surprising interactions with plugins that load tests
+   from what nose normally considers non-test modules, such as
+   the :doc:`doctest plugin <doctests>`. This is because any given
+   object in a module can't be loaded both by a plugin and the normal nose
+   :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+   or classes in non-test modules that look like tests but aren't, you will
+   likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+    """Collect tests from all python modules.
+    """
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        env_opt = 'NOSE_ALL_MODULES'
+        parser.add_option('--all-modules',
+                          action="store_true",
+                          dest=self.enableOpt,
+                          default=env.get(env_opt),
+                          help="Enable plugin %s: %s [%s]" %
+                          (self.__class__.__name__, self.help(), env_opt))
+
+    def wantFile(self, file):
+        """Override to return True for all files ending with .py"""
+        # always want .py files
+        if file.endswith('.py'):
+            return True
+
+    def wantModule(self, module):
+        """Override return True for all modules"""
+        return True
diff --git a/nose/plugins/attrib.py b/nose/plugins/attrib.py
new file mode 100644 (file)
index 0000000..3d4422a
--- /dev/null
@@ -0,0 +1,286 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+    test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+    $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr('slow')
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr(speed='slow')
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+This test could be run with ::
+
+    $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once.  For example:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr(speed='slow')
+    class MyTestCase:
+        def test_long_integration(self):
+            pass
+        def test_end_to_end_something(self):
+            pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+   Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+   Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+   Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+   If a test's ``tags`` attribute was a list and it contained the value
+   ``http`` then it would be run
+
+* ``nosetests -a slow``
+   Runs tests with the attribute ``slow`` if its value does not equal False
+   (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+   Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+   attribute that is equal to False
+   **NOTE**:
+   if your shell (like bash) interprets '!' as a special character make sure to
+   put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+  Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+  Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+    """Decorator that adds attributes to classes or functions
+    for use with the Attribute (-a) plugin.
+    """
+    def wrap_ob(ob):
+        for name in args:
+            setattr(ob, name, True)
+        for name, value in kwargs.iteritems():
+            setattr(ob, name, value)
+        return ob
+    return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+    """Look up an attribute on a method/ function. 
+    If the attribute isn't found there, looking it up in the
+    method's class, if any.
+    """
+    Missing = object()
+    value = getattr(method, attr_name, Missing)
+    if value is Missing and cls is not None:
+        value = getattr(cls, attr_name, Missing)
+    if value is Missing:
+        return default
+    return value
+
+
+class ContextHelper:
+    """Object that can act as context dictionary for eval and looks up
+    names as attributes on a method/ function and its class. 
+    """
+    def __init__(self, method, cls):
+        self.method = method
+        self.cls = cls
+
+    def __getitem__(self, name):
+        return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+    """Selects test cases to be run based on their attributes.
+    """
+
+    def __init__(self):
+        Plugin.__init__(self)
+        self.attribs = []
+
+    def options(self, parser, env):
+        """Register command line options"""
+        parser.add_option("-a", "--attr",
+                          dest="attr", action="append",
+                          default=env.get('NOSE_ATTR'),
+                          metavar="ATTR",
+                          help="Run only tests that have attributes "
+                          "specified by ATTR [NOSE_ATTR]")
+        # disable in < 2.4: eval can't take needed args
+        if compat_24:
+            parser.add_option("-A", "--eval-attr",
+                              dest="eval_attr", metavar="EXPR", action="append",
+                              default=env.get('NOSE_EVAL_ATTR'),
+                              help="Run only tests for whose attributes "
+                              "the Python expression EXPR evaluates "
+                              "to True [NOSE_EVAL_ATTR]")
+
+    def configure(self, options, config):
+        """Configure the plugin and system, based on selected options.
+
+        attr and eval_attr may each be lists.
+
+        self.attribs will be a list of lists of tuples. In that list, each
+        list is a group of attributes, all of which must match for the rule to
+        match.
+        """
+        self.attribs = []
+
+        # handle python eval-expression parameter
+        if compat_24 and options.eval_attr:
+            eval_attr = tolist(options.eval_attr)
+            for attr in eval_attr:
+                # "<python expression>"
+                # -> eval(expr) in attribute context must be True
+                def eval_in_context(expr, obj, cls):
+                    return eval(expr, None, ContextHelper(obj, cls))
+                self.attribs.append([(attr, eval_in_context)])
+
+        # attribute requirements are a comma separated list of
+        # 'key=value' pairs
+        if options.attr:
+            std_attr = tolist(options.attr)
+            for attr in std_attr:
+                # all attributes within an attribute group must match
+                attr_group = []
+                for attrib in attr.strip().split(","):
+                    # don't die on trailing comma
+                    if not attrib:
+                        continue
+                    items = attrib.split("=", 1)
+                    if len(items) > 1:
+                        # "name=value"
+                        # -> 'str(obj.name) == value' must be True
+                        key, value = items
+                    else:
+                        key = items[0]
+                        if key[0] == "!":
+                            # "!name"
+                            # 'bool(obj.name)' must be False
+                            key = key[1:]
+                            value = False
+                        else:
+                            # "name"
+                            # -> 'bool(obj.name)' must be True
+                            value = True
+                    attr_group.append((key, value))
+                self.attribs.append(attr_group)
+        if self.attribs:
+            self.enabled = True
+
+    def validateAttrib(self, method, cls = None):
+        """Verify whether a method has the required attributes
+        The method is considered a match if it matches all attributes
+        for any attribute group.
+        ."""
+        # TODO: is there a need for case-sensitive value comparison?
+        any = False
+        for group in self.attribs:
+            match = True
+            for key, value in group:
+                attr = get_method_attr(method, cls, key)
+                if callable(value):
+                    if not value(key, method, cls):
+                        match = False
+                        break
+                elif value is True:
+                    # value must exist and be True
+                    if not bool(attr):
+                        match = False
+                        break
+                elif value is False:
+                    # value must not exist or be False
+                    if bool(attr):
+                        match = False
+                        break
+                elif type(attr) in (list, tuple):
+                    # value must be found in the list attribute
+                    if not str(value).lower() in [str(x).lower()
+                                                  for x in attr]:
+                        match = False
+                        break
+                else:
+                    # value must match, convert to string and compare
+                    if (value != attr
+                        and str(value).lower() != str(attr).lower()):
+                        match = False
+                        break
+            any = any or match
+        if any:
+            # not True because we don't want to FORCE the selection of the
+            # item, only say that it is acceptable
+            return None
+        return False
+
+    def wantFunction(self, function):
+        """Accept the function if its attributes match.
+        """
+        return self.validateAttrib(function)
+
+    def wantMethod(self, method):
+        """Accept the method if its attributes match.
+        """
+        try:
+            cls = method.im_class
+        except AttributeError:
+            return False
+        return self.validateAttrib(method, cls)
diff --git a/nose/plugins/base.py b/nose/plugins/base.py
new file mode 100644 (file)
index 0000000..40701d2
--- /dev/null
@@ -0,0 +1,728 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+    """Base class for nose plugins. It's recommended but not *necessary* to
+    subclass this class to create a plugin, but all plugins *must* implement
+    `options(self, parser, env)` and `configure(self, options, conf)`, and
+    must have the attributes `enabled`, `name` and `score`.  The `name`
+    attribute may contain hyphens ('-').
+
+    Plugins should not be enabled by default.
+
+    Subclassing Plugin (and calling the superclass methods in
+    __init__, configure, and options, if you override them) will give
+    your plugin some friendly default behavior:
+
+    * A --with-$name option will be added to the command line interface
+      to enable the plugin, and a corresponding environment variable
+      will be used as the default value. The plugin class's docstring
+      will be used as the help for this option.
+    * The plugin will not be enabled unless this option is selected by
+      the user.
+    """
+    can_configure = False
+    enabled = False
+    enableOpt = None
+    name = None
+    score = 100
+
+    def __init__(self):
+        if self.name is None:
+            self.name = self.__class__.__name__.lower()
+        if self.enableOpt is None:
+            self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+    def addOptions(self, parser, env=None):
+        """Add command-line options for this plugin.
+
+        The base plugin class adds --with-$name by default, used to enable the
+        plugin.
+
+        .. warning :: Don't implement addOptions unless you want to override
+                      all default option handling behavior, including
+                      warnings for conflicting options. Implement
+                      :meth:`options
+                      <nose.plugins.base.IPluginInterface.options>`
+                      instead.
+        """
+        self.add_options(parser, env)
+
+    def add_options(self, parser, env=None):
+        """Non-camel-case version of func name for backwards compatibility.
+
+        .. warning ::
+
+           DEPRECATED: Do not use this method,
+           use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+           instead.
+
+        """
+        # FIXME raise deprecation warning if wasn't called by wrapper
+        if env is None:
+            env = os.environ
+        try:
+            self.options(parser, env)
+            self.can_configure = True
+        except OptionConflictError, e:
+            warn("Plugin %s has conflicting option string: %s and will "
+                 "be disabled" % (self, e), RuntimeWarning)
+            self.enabled = False
+            self.can_configure = False
+
+    def options(self, parser, env):
+        """Register commandline options.
+
+        Implement this method for normal options behavior with protection from
+        OptionConflictErrors. If you override this method and want the default
+        --with-$name option to be registered, be sure to call super().
+        """
+        env_opt = 'NOSE_WITH_%s' % self.name.upper()
+        env_opt = env_opt.replace('-', '_')
+        parser.add_option("--with-%s" % self.name,
+                          action="store_true",
+                          dest=self.enableOpt,
+                          default=env.get(env_opt),
+                          help="Enable plugin %s: %s [%s]" %
+                          (self.__class__.__name__, self.help(), env_opt))
+
+    def configure(self, options, conf):
+        """Configure the plugin and system, based on selected options.
+
+        The base plugin class sets the plugin to enabled if the enable option
+        for the plugin (self.enableOpt) is true.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        if hasattr(options, self.enableOpt):
+            self.enabled = getattr(options, self.enableOpt)
+
+    def help(self):
+        """Return help for this plugin. This will be output as the help
+        section of the --with-$name option that enables the plugin.
+        """
+        if self.__class__.__doc__:
+            # doc sections are often indented; compress the spaces
+            return textwrap.dedent(self.__class__.__doc__)
+        return "(no help available)"
+
+    # Compatiblity shim
+    def tolist(self, val):
+        warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+             DeprecationWarning)
+        return tolist(val)
+
+
+class IPluginInterface(object):
+    """
+    IPluginInterface describes the plugin API. Do not subclass or use this
+    class directly.
+    """
+    def __new__(cls, *arg, **kw):
+        raise TypeError("IPluginInterface class is for documentation only")
+
+    def addOptions(self, parser, env):
+        """Called to allow plugin to register command-line options with the
+        parser. DO NOT return a value from this method unless you want to stop
+        all other plugins from setting their options.
+
+        .. warning ::
+
+           DEPRECATED -- implement
+           :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+        """
+        pass
+    add_options = addOptions
+    add_options.deprecated = True
+
+    def addDeprecated(self, test):
+        """Called when a deprecated test is seen. DO NOT return a value
+        unless you want to stop other plugins from seeing the deprecated
+        test.
+
+        .. warning :: DEPRECATED -- check error class in addError instead
+        """
+        pass
+    addDeprecated.deprecated = True
+
+    def addError(self, test, err):
+        """Called when a test raises an uncaught exception. DO NOT return a
+        value unless you want to stop other plugins from seeing that the
+        test has raised an error.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`            
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    addError.changed = True
+
+    def addFailure(self, test, err):
+        """Called when a test fails. DO NOT return a value unless you
+        want to stop other plugins from seeing that the test has failed.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: 3-tuple
+        :type err: sys.exc_info() tuple
+        """
+        pass
+    addFailure.changed = True
+
+    def addSkip(self, test):
+        """Called when a test is skipped. DO NOT return a value unless
+        you want to stop other plugins from seeing the skipped test.
+
+        .. warning:: DEPRECATED -- check error class in addError instead
+        """
+        pass
+    addSkip.deprecated = True
+
+    def addSuccess(self, test):
+        """Called when a test passes. DO NOT return a value unless you
+        want to stop other plugins from seeing the passing test.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    addSuccess.changed = True
+
+    def afterContext(self):
+        """Called after a context (generally a module) has been
+        lazy-loaded, imported, setup, had its tests loaded and
+        executed, and torn down.
+        """
+        pass
+    afterContext._new = True
+
+    def afterDirectory(self, path):
+        """Called after all tests have been loaded from directory at path
+        and run.
+
+        :param path: the directory that has finished processing
+        :type path: string
+        """
+        pass
+    afterDirectory._new = True
+
+    def afterImport(self, filename, module):
+        """Called after module is imported from filename. afterImport
+        is called even if the import failed.
+
+        :param filename: The file that was loaded
+        :type filename: string
+        :param filename: The name of the module
+        :type module: string
+        """
+        pass
+    afterImport._new = True
+
+    def afterTest(self, test):
+        """Called after the test has been run and the result recorded
+        (after stopTest).
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    afterTest._new = True
+
+    def beforeContext(self):
+        """Called before a context (generally a module) is
+        examined. Because the context is not yet loaded, plugins don't
+        get to know what the context is; so any context operations
+        should use a stack that is pushed in `beforeContext` and popped
+        in `afterContext` to ensure they operate symmetrically.
+
+        `beforeContext` and `afterContext` are mainly useful for tracking
+        and restoring global state around possible changes from within a
+        context, whatever the context may be. If you need to operate on
+        contexts themselves, see `startContext` and `stopContext`, which
+        are passed the context in question, but are called after
+        it has been loaded (imported in the module case).
+        """
+        pass
+    beforeContext._new = True
+
+    def beforeDirectory(self, path):
+        """Called before tests are loaded from directory at path.
+
+        :param path: the directory that is about to be processed
+        """
+        pass
+    beforeDirectory._new = True
+
+    def beforeImport(self, filename, module):
+        """Called before module is imported from filename.
+
+        :param filename: The file that will be loaded
+        :param module: The name of the module found in file
+        :type module: string
+        """
+    beforeImport._new = True
+
+    def beforeTest(self, test):
+        """Called before the test is run (before startTest).
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    beforeTest._new = True
+    def begin(self):
+        """Called before any tests are collected or run. Use this to
+        perform any setup needed before testing begins.
+        """
+        pass
+
+    def configure(self, options, conf):
+        """Called after the command line has been parsed, with the
+        parsed options and the config container. Here, implement any
+        config storage or changes to state or operation that are set
+        by command line options.
+
+        DO NOT return a value from this method unless you want to
+        stop all other plugins from being configured.
+        """
+        pass
+
+    def finalize(self, result):
+        """Called after all report output, including output from all
+        plugins, has been sent to the stream. Use this to print final
+        test results or perform final cleanup. Return None to allow
+        other plugins to continue printing, or any other value to stop
+        them.
+
+        :param result: test result object
+        
+        .. Note:: When tests are run under a test runner other than
+           :class:`nose.core.TextTestRunner`, such as
+           via ``python setup.py test``, this method may be called
+           **before** the default report output is sent.
+        """
+        pass
+
+    def describeTest(self, test):
+        """Return a test description.
+
+        Called by :meth:`nose.case.Test.shortDescription`.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    describeTest._new = True
+
+    def formatError(self, test, err):
+        """Called in result.addError, before plugin.addError. If you
+        want to replace or modify the error tuple, return a new error
+        tuple.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    formatError._new = True
+    formatError.chainable = True
+    # test arg is not chainable
+    formatError.static_args = (True, False)
+
+    def formatFailure(self, test, err):
+        """Called in result.addFailure, before plugin.addFailure. If you
+        want to replace or modify the error tuple, return a new error
+        tuple. Because this method is chainable, you must return the
+        test as well, so you'll return something like::
+
+          return (test, err)
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    formatFailure._new = True
+    formatFailure.chainable = True
+    # test arg is not chainable
+    formatFailure.static_args = (True, False)
+
+    def handleError(self, test, err):
+        """Called on addError. To handle the error yourself and prevent normal
+        error processing, return a true value.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    handleError._new = True
+
+    def handleFailure(self, test, err):
+        """Called on addFailure. To handle the failure yourself and
+        prevent normal failure processing, return a true value.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    handleFailure._new = True
+
+    def loadTestsFromDir(self, path):
+        """Return iterable of tests from a directory. May be a
+        generator.  Each item returned must be a runnable
+        unittest.TestCase (or subclass) instance or suite instance.
+        Return None if your plugin cannot collect any tests from
+        directory.
+
+        :param  path: The path to the directory.
+        """
+        pass
+    loadTestsFromDir.generative = True
+    loadTestsFromDir._new = True
+    
+    def loadTestsFromModule(self, module, path=None):
+        """Return iterable of tests in a module. May be a
+        generator. Each item returned must be a runnable
+        unittest.TestCase (or subclass) instance.
+        Return None if your plugin cannot
+        collect any tests from module.
+
+        :param module: The module object
+        :type module: python module
+        :param path: the path of the module to search, to distinguish from
+            namespace package modules
+
+            .. note::
+
+               NEW. The ``path`` parameter will only be passed by nose 0.11
+               or above.
+        """
+        pass
+    loadTestsFromModule.generative = True
+
+    def loadTestsFromName(self, name, module=None, importPath=None):
+        """Return tests in this file or module. Return None if you are not able
+        to load any tests, or an iterable if you are. May be a
+        generator.
+
+        :param name: The test name. May be a file or module name plus a test
+            callable. Use split_test_name to split into parts. Or it might
+            be some crazy name of your own devising, in which case, do
+            whatever you want.
+        :param module: Module from which the name is to be loaded
+        :param importPath: Path from which file (must be a python module) was
+            found
+
+            .. warning:: DEPRECATED: this argument will NOT be passed.
+        """
+        pass
+    loadTestsFromName.generative = True
+
+    def loadTestsFromNames(self, names, module=None):
+        """Return a tuple of (tests loaded, remaining names). Return
+        None if you are not able to load any tests. Multiple plugins
+        may implement loadTestsFromNames; the remaining name list from
+        each will be passed to the next as input.
+
+        :param names: List of test names.
+        :type names: iterable
+        :param module: Module from which the names are to be loaded
+        """
+        pass
+    loadTestsFromNames._new = True
+    loadTestsFromNames.chainable = True
+
+    def loadTestsFromFile(self, filename):
+        """Return tests in this file. Return None if you are not
+        interested in loading any tests, or an iterable if you are and
+        can load some. May be a generator. *If you are interested in
+        loading tests from the file and encounter no errors, but find
+        no tests, yield False or return [False].*
+
+        .. Note:: This method replaces loadTestsFromPath from the 0.9
+                  API.
+
+        :param filename: The full path to the file or directory.
+        """
+        pass
+    loadTestsFromFile.generative = True
+    loadTestsFromFile._new = True
+
+    def loadTestsFromPath(self, path):
+        """
+        .. warning:: DEPRECATED -- use loadTestsFromFile instead
+        """
+        pass
+    loadTestsFromPath.deprecated = True
+
+    def loadTestsFromTestCase(self, cls):
+        """Return tests in this test case class. Return None if you are
+        not able to load any tests, or an iterable if you are. May be a
+        generator.
+
+        :param cls: The test case class. Must be subclass of
+           :class:`unittest.TestCase`.
+        """
+        pass
+    loadTestsFromTestCase.generative = True
+
+    def loadTestsFromTestClass(self, cls):
+        """Return tests in this test class. Class will *not* be a
+        unittest.TestCase subclass. Return None if you are not able to
+        load any tests, an iterable if you are. May be a generator.
+
+        :param cls: The test case class. Must be **not** be subclass of
+           :class:`unittest.TestCase`.
+        """
+        pass
+    loadTestsFromTestClass._new = True
+    loadTestsFromTestClass.generative = True
+
+    def makeTest(self, obj, parent):
+        """Given an object and its parent, return or yield one or more
+        test cases. Each test must be a unittest.TestCase (or subclass)
+        instance. This is called before default test loading to allow
+        plugins to load an alternate test case or cases for an
+        object. May be a generator.
+
+        :param obj: The object to be made into a test
+        :param parent: The parent of obj (eg, for a method, the class)
+        """
+        pass
+    makeTest._new = True
+    makeTest.generative = True
+
+    def options(self, parser, env):
+        """Called to allow plugin to register command line
+        options with the parser.
+
+        DO NOT return a value from this method unless you want to stop
+        all other plugins from setting their options.
+
+        :param parser: options parser instance
+        :type parser: :class:`ConfigParser.ConfigParser`
+        :param env: environment, default is os.environ
+        """
+        pass
+    options._new = True
+
+    def prepareTest(self, test):
+        """Called before the test is run by the test runner. Please
+        note the article *the* in the previous sentence: prepareTest
+        is called *only once*, and is passed the test case or test
+        suite that the test runner will execute. It is *not* called
+        for each individual test case. If you return a non-None value,
+        that return value will be run as the test. Use this hook to
+        wrap or decorate the test with another function. If you need
+        to modify or wrap individual test cases, use `prepareTestCase`
+        instead.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+
+    def prepareTestCase(self, test):
+        """Prepare or wrap an individual test case. Called before
+        execution of the test. The test passed here is a
+        nose.case.Test instance; the case to be executed is in the
+        test attribute of the passed case. To modify the test to be
+        run, you should return a callable that takes one argument (the
+        test result object) -- it is recommended that you *do not*
+        side-effect the nose.case.Test instance you have been passed.
+
+        Keep in mind that when you replace the test callable you are
+        replacing the run() method of the test case -- including the
+        exception handling and result calls, etc.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    prepareTestCase._new = True
+    
+    def prepareTestLoader(self, loader):
+        """Called before tests are loaded. To replace the test loader,
+        return a test loader. To allow other plugins to process the
+        test loader, return None. Only one plugin may replace the test
+        loader. Only valid when using nose.TestProgram.
+
+        :param loader: :class:`nose.loader.TestLoader` 
+             (or other loader) instance
+        """
+        pass
+    prepareTestLoader._new = True
+
+    def prepareTestResult(self, result):
+        """Called before the first test is run. To use a different
+        test result handler for all tests than the given result,
+        return a test result handler. NOTE however that this handler
+        will only be seen by tests, that is, inside of the result
+        proxy system. The TestRunner and TestProgram -- whether nose's
+        or other -- will continue to see the original result
+        handler. For this reason, it is usually better to monkeypatch
+        the result (for instance, if you want to handle some
+        exceptions in a unique way). Only one plugin may replace the
+        result, but many may monkeypatch it. If you want to
+        monkeypatch and stop other plugins from doing so, monkeypatch
+        and return the patched result.
+
+        :param result: :class:`nose.result.TextTestResult` 
+             (or other result) instance
+        """
+        pass
+    prepareTestResult._new = True
+
+    def prepareTestRunner(self, runner):
+        """Called before tests are run. To replace the test runner,
+        return a test runner. To allow other plugins to process the
+        test runner, return None. Only valid when using nose.TestProgram.
+
+        :param runner: :class:`nose.core.TextTestRunner` 
+             (or other runner) instance
+        """
+        pass
+    prepareTestRunner._new = True
+        
+    def report(self, stream):
+        """Called after all error output has been printed. Print your
+        plugin's report to the provided stream. Return None to allow
+        other plugins to print reports, any other value to stop them.
+
+        :param stream: stream object; send your output here
+        :type stream: file-like object
+        """
+        pass
+
+    def setOutputStream(self, stream):
+        """Called before test output begins. To direct test output to a
+        new stream, return a stream object, which must implement a
+        `write(msg)` method. If you only want to note the stream, not
+        capture or redirect it, then return None.
+
+        :param stream: stream object; send your output here
+        :type stream: file-like object
+        """
+
+    def startContext(self, context):
+        """Called before context setup and the running of tests in the
+        context. Note that tests have already been *loaded* from the
+        context before this call.
+
+        :param context: the context about to be setup. May be a module or
+             class, or any other object that contains tests.
+        """
+        pass
+    startContext._new = True
+    
+    def startTest(self, test):
+        """Called before each test is run. DO NOT return a value unless
+        you want to stop other plugins from seeing the test start.
+
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+
+    def stopContext(self, context):
+        """Called after the tests in a context have run and the
+        context has been torn down.
+
+        :param context: the context about to be setup. May be a module or
+             class, or any other object that contains tests.
+        """
+        pass
+    stopContext._new = True
+    
+    def stopTest(self, test):
+        """Called after each test is run. DO NOT return a value unless
+        you want to stop other plugins from seeing that the test has stopped.
+
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+
+    def testName(self, test):
+        """Return a short test name. Called by `nose.case.Test.__str__`.
+
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    testName._new = True
+
+    def wantClass(self, cls):
+        """Return true if you want the main test selector to collect
+        tests from this class, false if you don't, and None if you don't
+        care.
+
+        :param cls: The class being examined by the selector
+        """
+        pass
+    
+    def wantDirectory(self, dirname):
+        """Return true if you want test collection to descend into this
+        directory, false if you do not, and None if you don't care.
+
+        :param dirname: Full path to directory being examined by the selector
+        """
+        pass
+    
+    def wantFile(self, file):
+        """Return true if you want to collect tests from this file,
+        false if you do not and None if you don't care.
+
+        Change from 0.9: The optional package parameter is no longer passed.
+
+        :param file: Full path to file being examined by the selector
+        """
+        pass
+    
+    def wantFunction(self, function):
+        """Return true to collect this function as a test, false to
+        prevent it from being collected, and None if you don't care.
+
+        :param function: The function object being examined by the selector
+        """
+        pass
+    
+    def wantMethod(self, method):
+        """Return true to collect this method as a test, false to
+        prevent it from being collected, and None if you don't care.
+        
+        :param method: The method object being examined by the selector
+        :type method: unbound method
+        """    
+        pass
+    
+    def wantModule(self, module):
+        """Return true if you want to collection to descend into this
+        module, false to prevent the collector from descending into the
+        module, and None if you don't care.
+
+        :param module: The module object being examined by the selector
+        :type module: python module
+        """
+        pass
+    
+    def wantModuleTests(self, module):
+        """
+        .. warning:: DEPRECATED -- this method will not be called, it has
+                     been folded into wantModule.
+        """
+        pass
+    wantModuleTests.deprecated = True
+    
diff --git a/nose/plugins/builtin.py b/nose/plugins/builtin.py
new file mode 100644 (file)
index 0000000..4fcc001
--- /dev/null
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+    ('nose.plugins.attrib', 'AttributeSelector'),
+    ('nose.plugins.capture', 'Capture'),
+    ('nose.plugins.logcapture', 'LogCapture'),
+    ('nose.plugins.cover', 'Coverage'),
+    ('nose.plugins.debug', 'Pdb'),
+    ('nose.plugins.deprecated', 'Deprecated'),
+    ('nose.plugins.doctests', 'Doctest'),
+    ('nose.plugins.isolate', 'IsolationPlugin'),
+    ('nose.plugins.failuredetail', 'FailureDetail'),
+    ('nose.plugins.prof', 'Profile'),
+    ('nose.plugins.skip', 'Skip'),
+    ('nose.plugins.testid', 'TestId'),
+    ('nose.plugins.multiprocess', 'MultiProcess'),
+    ('nose.plugins.xunit', 'Xunit'),
+    ('nose.plugins.allmodules', 'AllModules'),
+    ('nose.plugins.collect', 'CollectOnly'),
+    )
+
+for module, cls in builtins:
+    try:
+        plugmod = __import__(module, globals(), locals(), [cls])
+    except KeyboardInterrupt:
+        raise
+    except:
+        continue
+    plug = getattr(plugmod, cls)
+    plugins.append(plug)
+    globals()[cls] = plug
+
diff --git a/nose/plugins/capture.py b/nose/plugins/capture.py
new file mode 100644 (file)
index 0000000..911215d
--- /dev/null
@@ -0,0 +1,128 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+  ``--nocapture``
+    Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.util import ln
+from StringIO import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+    """
+    Output capture plugin. Enabled by default. Disable with ``-s`` or
+    ``--nocapture``. This plugin captures stdout during test execution,
+    appending any output captured to the error or failure output,
+    should the test fail or raise an error.
+    """
+    enabled = True
+    env_opt = 'NOSE_NOCAPTURE'
+    name = 'capture'
+    score = 500
+
+    def __init__(self):
+        self.stdout = []
+        self._buf = None
+
+    def options(self, parser, env):
+        """Register commandline options
+        """
+        parser.add_option(
+            "-s", "--nocapture", action="store_false",
+            default=not env.get(self.env_opt), dest="capture",
+            help="Don't capture stdout (any stdout output "
+            "will be printed immediately) [NOSE_NOCAPTURE]")
+
+    def configure(self, options, conf):
+        """Configure plugin. Plugin is enabled by default.
+        """
+        self.conf = conf
+        if not options.capture:
+            self.enabled = False
+
+    def afterTest(self, test):
+        """Clear capture buffer.
+        """
+        self.end()
+        self._buf = None
+
+    def begin(self):
+        """Replace sys.stdout with capture buffer.
+        """
+        self.start() # get an early handle on sys.stdout
+
+    def beforeTest(self, test):
+        """Flush capture buffer.
+        """
+        self.start()
+
+    def formatError(self, test, err):
+        """Add captured output to error report.
+        """
+        test.capturedOutput = output = self.buffer
+        self._buf = None
+        if not output:
+            # Don't return None as that will prevent other
+            # formatters from formatting and remove earlier formatters
+            # formats, instead return the err we got
+            return err
+        ec, ev, tb = err
+        return (ec, self.addCaptureToErr(ev, output), tb)
+
+    def formatFailure(self, test, err):
+        """Add captured output to failure report.
+        """
+        return self.formatError(test, err)
+
+    def addCaptureToErr(self, ev, output):
+        if isinstance(ev, Exception):
+            if hasattr(ev, '__unicode__'):
+                # 2.6+
+                ev = unicode(ev)
+            else:
+                # 2.5-
+                if not hasattr(ev, 'message'):
+                    # 2.4
+                    msg = len(ev.args) and ev.args[0] or ''
+                else:
+                    msg = ev.message
+                if not isinstance(msg, unicode):
+                    msg = msg.decode('utf8', 'replace')
+                ev = u'%s: %s' % (ev.__class__.__name__, msg)
+        if not isinstance(output, unicode):
+            output = output.decode('utf8', 'replace')
+        return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
+                           output, ln(u'>> end captured stdout <<')])
+
+    def start(self):
+        self.stdout.append(sys.stdout)
+        self._buf = StringIO()
+        sys.stdout = self._buf
+
+    def end(self):
+        if self.stdout:
+            sys.stdout = self.stdout.pop()
+
+    def finalize(self, result):
+        """Restore stdout.
+        """
+        while self.stdout:
+            self.end()
+
+    def _get_buffer(self):
+        if self._buf is not None:
+            return self._buf.getvalue()
+
+    buffer = property(_get_buffer, None, None,
+                      """Captured stdout output.""")
diff --git a/nose/plugins/collect.py b/nose/plugins/collect.py
new file mode 100644 (file)
index 0000000..6f9f0fa
--- /dev/null
@@ -0,0 +1,94 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the 
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+    """
+    Collect and output test names only, don't run any tests.
+    """
+    name = "collect-only"
+    enableOpt = 'collect_only'
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option('--collect-only',
+                          action='store_true',
+                          dest=self.enableOpt,
+                          default=env.get('NOSE_COLLECT_ONLY'),
+                          help="Enable collect-only: %s [COLLECT_ONLY]" %
+                          (self.help()))
+
+    def prepareTestLoader(self, loader):
+        """Install collect-only suite class in TestLoader.
+        """
+        # Disable context awareness
+        log.debug("Preparing test loader")
+        loader.suiteClass = TestSuiteFactory(self.conf)
+
+    def prepareTestCase(self, test):
+        """Replace actual test with dummy that always passes.
+        """
+        # Return something that always passes
+        log.debug("Preparing test case %s", test)
+        if not isinstance(test, Test):
+            return
+        def run(result):
+            # We need to make these plugin calls because there won't be
+            # a result proxy, due to using a stripped-down test suite
+            self.conf.plugins.startTest(test)
+            result.startTest(test)
+            self.conf.plugins.addSuccess(test)
+            result.addSuccess(test)
+            self.conf.plugins.stopTest(test)
+            result.stopTest(test)
+        return run
+
+
+class TestSuiteFactory:
+    """
+    Factory for producing configured test suites.
+    """
+    def __init__(self, conf):
+        self.conf = conf
+
+    def __call__(self, tests=(), **kw):
+        return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+    """
+    Basic test suite that bypasses most proxy and plugin calls, but does
+    wrap tests in a nose.case.Test so prepareTestCase will be called.
+    """
+    def __init__(self, tests=(), conf=None):
+        self.conf = conf
+        # Exec lazy suites: makes discovery depth-first
+        if callable(tests):
+            tests = tests()
+        log.debug("TestSuite(%r)", tests)
+        unittest.TestSuite.__init__(self, tests)
+
+    def addTest(self, test):
+        log.debug("Add test %s", test)
+        if isinstance(test, unittest.TestSuite):
+            self._tests.append(test)
+        else:
+            self._tests.append(Test(test, config=self.conf))
+
diff --git a/nose/plugins/cover.py b/nose/plugins/cover.py
new file mode 100644 (file)
index 0000000..f10b198
--- /dev/null
@@ -0,0 +1,308 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-packages`` switch or the NOSE_COVER_PACKAGES environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import os
+import re
+import sys
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log =  logging.getLogger(__name__)
+
+COVERAGE_TEMPLATE = '''<html>
+<head>
+%(title)s
+</head>
+<body>
+%(header)s
+<style>
+.coverage pre {float: left; margin: 0px 1em; border: none;
+               padding: 0px; }
+.num pre { margin: 0px }
+.nocov, .nocov pre {background-color: #faa}
+.cov, .cov pre {background-color: #cfc}
+div.coverage div { clear: both; height: 1.1em}
+</style>
+<div class="stats">
+%(stats)s
+</div>
+<div class="coverage">
+%(body)s
+</div>
+</body>
+</html>
+'''
+
+COVERAGE_STATS_TEMPLATE = '''Covered: %(covered)s lines<br/>
+Missed: %(missed)s lines<br/>
+Skipped %(skipped)s lines<br/>
+Percent: %(percent)s %%<br/>
+'''
+
+
+class Coverage(Plugin):
+    """
+    Activate a coverage report using Ned Batchelder's coverage module.
+    """
+    coverTests = False
+    coverPackages = None
+    _coverInstance = None
+    score = 200
+    status = {}
+
+    def coverInstance(self):
+        if not self._coverInstance:
+            import coverage
+            try:
+                self._coverInstance = coverage.coverage()
+            except coverage.CoverageException:
+                self._coverInstance = coverage
+        return self._coverInstance
+    coverInstance = property(coverInstance)
+
+    def options(self, parser, env):
+        """
+        Add options to command line.
+        """
+        Plugin.options(self, parser, env)
+        parser.add_option("--cover-package", action="append",
+                          default=env.get('NOSE_COVER_PACKAGE'),
+                          metavar="PACKAGE",
+                          dest="cover_packages",
+                          help="Restrict coverage output to selected packages "
+                          "[NOSE_COVER_PACKAGE]")
+        parser.add_option("--cover-erase", action="store_true",
+                          default=env.get('NOSE_COVER_ERASE'),
+                          dest="cover_erase",
+                          help="Erase previously collected coverage "
+                          "statistics before run")
+        parser.add_option("--cover-tests", action="store_true",
+                          dest="cover_tests",
+                          default=env.get('NOSE_COVER_TESTS'),
+                          help="Include test modules in coverage report "
+                          "[NOSE_COVER_TESTS]")
+        parser.add_option("--cover-inclusive", action="store_true",
+                          dest="cover_inclusive",
+                          default=env.get('NOSE_COVER_INCLUSIVE'),
+                          help="Include all python files under working "
+                          "directory in coverage report.  Useful for "
+                          "discovering holes in test coverage if not all "
+                          "files are imported by the test suite. "
+                          "[NOSE_COVER_INCLUSIVE]")
+        parser.add_option("--cover-html", action="store_true",
+                          default=env.get('NOSE_COVER_HTML'),
+                          dest='cover_html',
+                          help="Produce HTML coverage information")
+        parser.add_option('--cover-html-dir', action='store',
+                          default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+                          dest='cover_html_dir',
+                          metavar='DIR',
+                          help='Produce HTML coverage information in dir')
+
+    def configure(self, options, config):
+        """
+        Configure plugin.
+        """
+        try:
+            self.status.pop('active')
+        except KeyError:
+            pass
+        Plugin.configure(self, options, config)
+        if config.worker:
+            return
+        if self.enabled:
+            try:
+                import coverage
+            except ImportError:
+                log.error("Coverage not available: "
+                          "unable to import coverage module")
+                self.enabled = False
+                return
+        self.conf = config
+        self.coverErase = options.cover_erase
+        self.coverTests = options.cover_tests
+        self.coverPackages = []
+        if options.cover_packages:
+            for pkgs in [tolist(x) for x in options.cover_packages]:
+                self.coverPackages.extend(pkgs)
+        self.coverInclusive = options.cover_inclusive
+        if self.coverPackages:
+            log.info("Coverage report will include only packages: %s",
+                     self.coverPackages)
+        self.coverHtmlDir = None
+        if options.cover_html:
+            self.coverHtmlDir = options.cover_html_dir
+            log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+        if self.enabled:
+            self.status['active'] = True
+
+    def begin(self):
+        """
+        Begin recording coverage information.
+        """
+        log.debug("Coverage begin")
+        self.skipModules = sys.modules.keys()[:]
+        if self.coverErase:
+            log.debug("Clearing previously collected coverage statistics")
+            self.coverInstance.erase()
+        self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+        self.coverInstance.start()
+
+    def report(self, stream):
+        """
+        Output code coverage report.
+        """
+        log.debug("Coverage report")
+        self.coverInstance.stop()
+        self.coverInstance.save()
+        modules = [ module
+                    for name, module in sys.modules.items()
+                    if self.wantModuleCoverage(name, module) ]
+        log.debug("Coverage report will cover modules: %s", modules)
+        self.coverInstance.report(modules, file=stream)
+        if self.coverHtmlDir:
+            log.debug("Generating HTML coverage report")
+            if hasattr(self.coverInstance, 'html_report'):
+                self.coverInstance.html_report(modules, self.coverHtmlDir)
+            else:
+                self.report_html(modules)
+
+    def report_html(self, modules):
+        if not os.path.exists(self.coverHtmlDir):
+            os.makedirs(self.coverHtmlDir)
+        files = {}
+        for m in modules:
+            if hasattr(m, '__name__') and hasattr(m, '__file__'):
+                files[m.__name__] = m.__file__
+        self.coverInstance.annotate(files.values())
+        global_stats =  {'covered': 0, 'missed': 0, 'skipped': 0}
+        file_list = []
+        for m, f in files.iteritems():
+            if f.endswith('pyc'):
+                f = f[:-1]
+            coverfile = f+',cover'
+            outfile, stats = self.htmlAnnotate(m, f, coverfile,
+                                               self.coverHtmlDir)
+            for field in ('covered', 'missed', 'skipped'):
+                global_stats[field] += stats[field]
+            file_list.append((stats['percent'], m, outfile, stats))
+            os.unlink(coverfile)
+        file_list.sort()
+        global_stats['percent'] = self.computePercent(
+            global_stats['covered'], global_stats['missed'])
+        # Now write out an index file for the coverage HTML
+        index = open(os.path.join(self.coverHtmlDir, 'index.html'), 'w')
+        index.write('<html><head><title>Coverage Index</title></head>'
+                    '<body><p>')
+        index.write(COVERAGE_STATS_TEMPLATE % global_stats)
+        index.write('<table><tr><td>File</td><td>Covered</td><td>Missed'
+                    '</td><td>Skipped</td><td>Percent</td></tr>')
+        for junk, name, outfile, stats in file_list:
+            stats['a'] = '<a href="%s">%s</a>' % (outfile, name)
+            index.write('<tr><td>%(a)s</td><td>%(covered)s</td><td>'
+                        '%(missed)s</td><td>%(skipped)s</td><td>'
+                        '%(percent)s %%</td></tr>' % stats)
+        index.write('</table></p></html')
+        index.close()
+
+    def htmlAnnotate(self, name, file, coverfile, outputDir):
+        log.debug('Name: %s file: %s' % (name, file, ))
+        rows = []
+        data = open(coverfile, 'r').read().split('\n')
+        padding = len(str(len(data)))
+        stats = {'covered': 0, 'missed': 0, 'skipped': 0}
+        for lineno, line in enumerate(data):
+            lineno += 1
+            if line:
+                status = line[0]
+                line = line[2:]
+            else:
+                status = ''
+                line = ''
+            lineno = (' ' * (padding - len(str(lineno)))) + str(lineno)
+            for old, new in (('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;'),
+                             ('"', '&quot;'), ):
+                line = line.replace(old, new)
+            if status == '!':
+                rows.append('<div class="nocov"><span class="num"><pre>'
+                            '%s</pre></span><pre>%s</pre></div>' % (lineno,
+                                                                    line))
+                stats['missed'] += 1
+            elif status == '>':
+                rows.append('<div class="cov"><span class="num"><pre>%s</pre>'
+                            '</span><pre>%s</pre></div>' % (lineno, line))
+                stats['covered'] += 1
+            else:
+                rows.append('<div class="skip"><span class="num"><pre>%s</pre>'
+                            '</span><pre>%s</pre></div>' % (lineno, line))
+                stats['skipped'] += 1
+        stats['percent'] = self.computePercent(stats['covered'],
+                                               stats['missed'])
+        html = COVERAGE_TEMPLATE % {'title': '<title>%s</title>' % name,
+                                    'header': name,
+                                    'body': '\n'.join(rows),
+                                    'stats': COVERAGE_STATS_TEMPLATE % stats,
+                                   }
+        outfilename = name + '.html'
+        outfile = open(os.path.join(outputDir, outfilename), 'w')
+        outfile.write(html)
+        outfile.close()
+        return outfilename, stats
+
+    def computePercent(self, covered, missed):
+        if covered + missed == 0:
+            percent = 1
+        else:
+            percent = covered/(covered+missed+0.0)
+        return int(percent * 100)
+
+    def wantModuleCoverage(self, name, module):
+        if not hasattr(module, '__file__'):
+            log.debug("no coverage of %s: no __file__", name)
+            return False
+        module_file = src(module.__file__)
+        if not module_file or not module_file.endswith('.py'):
+            log.debug("no coverage of %s: not a python file", name)
+            return False
+        if self.coverPackages:
+            for package in self.coverPackages:
+                if (re.findall(r'^%s\b' % re.escape(package), name)
+                    and (self.coverTests
+                         or not self.conf.testMatch.search(name))):
+                    log.debug("coverage for %s", name)
+                    return True
+        if name in self.skipModules:
+            log.debug("no coverage for %s: loaded before coverage start",
+                      name)
+            return False
+        if self.conf.testMatch.search(name) and not self.coverTests:
+            log.debug("no coverage for %s: is a test", name)
+            return False
+        # accept any package that passed the previous tests, unless
+        # coverPackages is on -- in that case, if we wanted this
+        # module, we would have already returned True
+        return not self.coverPackages
+
+    def wantFile(self, file, package=None):
+        """If inclusive coverage enabled, return true for all source files
+        in wanted packages.
+        """
+        if self.coverInclusive:
+            if file.endswith(".py"):
+                if package and self.coverPackages:
+                    for want in self.coverPackages:
+                        if package.startswith(want):
+                            return True
+                else:
+                    return True
+        return None
diff --git a/nose/plugins/debug.py b/nose/plugins/debug.py
new file mode 100644 (file)
index 0000000..c7fc462
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+    """
+    Provides --pdb and --pdb-failures options that cause the test runner to
+    drop into pdb if it encounters an error or failure, respectively.
+    """
+    enabled_for_errors = False
+    enabled_for_failures = False
+    score = 5 # run last, among builtins
+    
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option(
+            "--pdb", action="store_true", dest="debugErrors",
+            default=env.get('NOSE_PDB', False),
+            help="Drop into debugger on errors")
+        parser.add_option(
+            "--pdb-failures", action="store_true",
+            dest="debugFailures",
+            default=env.get('NOSE_PDB_FAILURES', False),
+            help="Drop into debugger on failures")
+
+    def configure(self, options, conf):
+        """Configure which kinds of exceptions trigger plugin.
+        """
+        self.conf = conf
+        self.enabled = options.debugErrors or options.debugFailures
+        self.enabled_for_errors = options.debugErrors
+        self.enabled_for_failures = options.debugFailures
+
+    def addError(self, test, err):
+        """Enter pdb if configured to debug errors.
+        """
+        if not self.enabled_for_errors:
+            return
+        self.debug(err)
+
+    def addFailure(self, test, err):
+        """Enter pdb if configured to debug failures.
+        """
+        if not self.enabled_for_failures:
+            return
+        self.debug(err)
+
+    def debug(self, err):
+        import sys # FIXME why is this import here?
+        ec, ev, tb = err
+        stdout = sys.stdout
+        sys.stdout = sys.__stdout__
+        try:
+            pdb.post_mortem(tb)
+        finally:
+            sys.stdout = stdout
diff --git a/nose/plugins/deprecated.py b/nose/plugins/deprecated.py
new file mode 100644 (file)
index 0000000..461a26b
--- /dev/null
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+    """Raise this exception to mark a test as deprecated.
+    """
+    pass
+
+
+class Deprecated(ErrorClassPlugin):
+    """
+    Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+    by default.
+    """
+    enabled = True
+    deprecated = ErrorClass(DeprecatedTest,
+                            label='DEPRECATED',
+                            isfailure=False)
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        env_opt = 'NOSE_WITHOUT_DEPRECATED'
+        parser.add_option('--no-deprecated', action='store_true',
+                          dest='noDeprecated', default=env.get(env_opt, False),
+                          help="Disable special handling of DeprecatedTest "
+                          "exceptions.")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        disable = getattr(options, 'noDeprecated', False)
+        if disable:
+            self.enabled = False
diff --git a/nose/plugins/doctests.py b/nose/plugins/doctests.py
new file mode 100644 (file)
index 0000000..f07f641
--- /dev/null
@@ -0,0 +1,428 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`.  Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+   
+  Called before the test runs. You may raise SkipTest to skip all tests.
+  
+* teardown([module]) or teardown_module([module])
+
+  Called after the test runs, if setup/setup_module did not raise an
+  unhandled exception.
+
+* setup_test(test)
+
+  Called before the test. NOTE: the argument passed is a
+  doctest.DocTest instance, *not* a unittest.TestCase.
+  
+* teardown_test(test)
+  Called after the test, if setup_test did not raise an exception. NOTE: the
+  argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+  
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+   See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+   additional documentation and examples.
+
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+     src, tolist, isproperty
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+import sys
+import __builtin__ as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+    import doctest
+    doctest.DocTestCase
+    # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+    # system version is too old
+    import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+    def __init__(self, out):
+        self.__debugger_used = False
+        _orp.__init__(self, out)
+
+    def set_trace(self):
+        self.__debugger_used = True
+        _orp.set_trace(self, sys._getframe().f_back)
+
+    def set_continue(self):
+        # Calling set_continue unconditionally would break unit test 
+        # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+        if self.__debugger_used:
+            _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb    
+
+
+class DoctestSuite(unittest.TestSuite):
+    """
+    Doctest suites are parallelizable at the module or file level only,
+    since they may be attached to objects that are not individually
+    addressable (like properties). This suite subclass is used when
+    loading doctests from a module to ensure that behavior.
+
+    This class is used only if the plugin is not fully prepared;
+    in normal use, the loader's suiteClass is used.
+    
+    """
+    can_split = False
+    
+    def __init__(self, tests=(), context=None, can_split=False):
+        self.context = context
+        self.can_split = can_split
+        unittest.TestSuite.__init__(self, tests=tests)
+
+    def address(self):
+        return test_address(self.context)
+
+    def __iter__(self):
+        # 2.3 compat
+        return iter(self._tests)
+
+    def __str__(self):
+        return str(self._tests)
+
+        
+class Doctest(Plugin):
+    """
+    Activate doctest plugin to find and run doctests in non-test modules.
+    """
+    extension = None
+    suiteClass = DoctestSuite
+    
+    def options(self, parser, env):
+        """Register commmandline options.
+        """
+        Plugin.options(self, parser, env)
+        parser.add_option('--doctest-tests', action='store_true',
+                          dest='doctest_tests',
+                          default=env.get('NOSE_DOCTEST_TESTS'),
+                          help="Also look for doctests in test modules. "
+                          "Note that classes, methods and functions should "
+                          "have either doctests or non-doctest tests, "
+                          "not both. [NOSE_DOCTEST_TESTS]")
+        parser.add_option('--doctest-extension', action="append",
+                          dest="doctestExtension",
+                          metavar="EXT",
+                          help="Also look for doctests in files with "
+                          "this extension [NOSE_DOCTEST_EXTENSION]")
+        parser.add_option('--doctest-result-variable',
+                          dest='doctest_result_var',
+                          default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+                          metavar="VAR",
+                          help="Change the variable name set to the result of "
+                          "the last interpreter command from the default '_'. "
+                          "Can be used to avoid conflicts with the _() "
+                          "function used for text translation. "
+                          "[NOSE_DOCTEST_RESULT_VAR]")
+        parser.add_option('--doctest-fixtures', action="store",
+                          dest="doctestFixtures",
+                          metavar="SUFFIX",
+                          help="Find fixtures for a doctest file in module "
+                          "with this name appended to the base name "
+                          "of the doctest file")
+        # Set the default as a list, if given in env; otherwise
+        # an additional value set on the command line will cause
+        # an error.
+        env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+        if env_setting is not None:
+            parser.set_defaults(doctestExtension=tolist(env_setting))
+
+    def configure(self, options, config):
+        """Configure plugin.
+        """
+        Plugin.configure(self, options, config)
+        self.doctest_result_var = options.doctest_result_var
+        self.doctest_tests = options.doctest_tests
+        self.extension = tolist(options.doctestExtension)
+        self.fixtures = options.doctestFixtures
+        self.finder = doctest.DocTestFinder()
+        
+    def prepareTestLoader(self, loader):
+        """Capture loader's suiteClass.
+
+        This is used to create test suites from doctest files.
+        
+        """
+        self.suiteClass = loader.suiteClass
+
+    def loadTestsFromModule(self, module):
+        """Load doctests from the module.
+        """
+        log.debug("loading from %s", module)
+        if not self.matches(module.__name__):
+            log.debug("Doctest doesn't want module %s", module)
+            return
+        try:
+            tests = self.finder.find(module)
+        except AttributeError:
+            log.exception("Attribute error loading from %s", module)
+            # nose allows module.__test__ = False; doctest does not and throws
+            # AttributeError
+            return
+        if not tests:
+            log.debug("No tests found in %s", module)
+            return
+        tests.sort()
+        module_file = src(module.__file__)
+        # FIXME this breaks the id plugin somehow (tests probably don't
+        # get wrapped in result proxy or something)
+        cases = []
+        for test in tests:
+            if not test.examples:
+                continue
+            if not test.filename:
+                test.filename = module_file
+            cases.append(DocTestCase(test, result_var=self.doctest_result_var))
+        if cases:
+            yield self.suiteClass(cases, context=module, can_split=False)
+            
+    def loadTestsFromFile(self, filename):
+        """Load doctests from the file.
+
+        Tests are loaded only if filename's extension matches
+        configured doctest extension.
+
+        """
+        if self.extension and anyp(filename.endswith, self.extension):
+            name = os.path.basename(filename)
+            dh = open(filename)
+            try:
+                doc = dh.read()
+            finally:
+                dh.close()
+
+            fixture_context = None
+            globs = {'__file__': filename}
+            if self.fixtures:
+                base, ext = os.path.splitext(name)
+                dirname = os.path.dirname(filename)
+                sys.path.append(dirname)
+                fixt_mod = base + self.fixtures
+                try:
+                    fixture_context = __import__(
+                        fixt_mod, globals(), locals(), ["nop"])
+                except ImportError, e:
+                    log.debug(
+                        "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+                log.debug("Fixture module %s resolved to %s",
+                          fixt_mod, fixture_context)
+                if hasattr(fixture_context, 'globs'):
+                    globs = fixture_context.globs(globs)                    
+            parser = doctest.DocTestParser()
+            test = parser.get_doctest(
+                doc, globs=globs, name=name,
+                filename=filename, lineno=0)
+            if test.examples:
+                case = DocFileCase(
+                    test,
+                    setUp=getattr(fixture_context, 'setup_test', None),
+                    tearDown=getattr(fixture_context, 'teardown_test', None),
+                    result_var=self.doctest_result_var)
+                if fixture_context:
+                    yield ContextList((case,), context=fixture_context)
+                else:
+                    yield case
+            else:
+                yield False # no tests to load
+            
+    def makeTest(self, obj, parent):
+        """Look for doctests in the given object, which will be a
+        function, method or class.
+        """
+        name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+        doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+        if doctests:
+            for test in doctests:
+                if len(test.examples) == 0:
+                    continue
+                yield DocTestCase(test, obj=obj,
+                                  result_var=self.doctest_result_var)
+    
+    def matches(self, name):
+        # FIXME this seems wrong -- nothing is ever going to
+        # fail this test, since we're given a module NAME not FILE
+        if name == '__init__.py':
+            return False
+        # FIXME don't think we need include/exclude checks here?
+        return ((self.doctest_tests or not self.conf.testMatch.search(name)
+                 or (self.conf.include 
+                     and filter(None,
+                                [inc.search(name)
+                                 for inc in self.conf.include])))
+                and (not self.conf.exclude 
+                     or not filter(None,
+                                   [exc.search(name)
+                                    for exc in self.conf.exclude])))
+    
+    def wantFile(self, file):
+        """Override to select all modules and any file ending with
+        configured doctest extension.
+        """
+        # always want .py files
+        if file.endswith('.py'):
+            return True
+        # also want files that match my extension
+        if (self.extension
+            and anyp(file.endswith, self.extension)
+            and (not self.conf.exclude
+                 or not filter(None, 
+                               [exc.search(file)
+                                for exc in self.conf.exclude]))):
+            return True
+        return None
+
+
+class DocTestCase(doctest.DocTestCase):
+    """Overrides DocTestCase to
+    provide an address() method that returns the correct address for
+    the doctest case. To provide hints for address(), an obj may also
+    be passed -- this will be used as the test object for purposes of
+    determining the test address, if it is provided.
+    """
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None, obj=None, result_var='_'):
+        self._result_var = result_var
+        self._nose_obj = obj
+        super(DocTestCase, self).__init__(
+            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+            checker=checker)
+    
+    def address(self):
+        if self._nose_obj is not None:
+            return test_address(self._nose_obj)
+        obj = resolve_name(self._dt_test.name)
+
+        if isproperty(obj):
+            # properties have no connection to the class they are in
+            # so we can't just look 'em up, we have to first look up
+            # the class, then stick the prop on the end
+            parts = self._dt_test.name.split('.')
+            class_name = '.'.join(parts[:-1])
+            cls = resolve_name(class_name)
+            base_addr = test_address(cls)
+            return (base_addr[0], base_addr[1],
+                    '.'.join([base_addr[2], parts[-1]]))
+        else:
+            return test_address(obj)
+    
+    # doctests loaded via find(obj) omit the module name
+    # so we need to override id, __repr__ and shortDescription
+    # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+    def id(self):
+        name = self._dt_test.name
+        filename = self._dt_test.filename
+        if filename is not None:
+            pk = getpackage(filename)
+            if not name.startswith(pk):
+                name = "%s.%s" % (pk, name)
+        return name
+    
+    def __repr__(self):
+        name = self.id()
+        name = name.split('.')
+        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+    __str__ = __repr__
+
+    def shortDescription(self):
+        return 'Doctest: %s' % self.id()
+
+    def setUp(self):
+        if self._result_var is not None:
+            self._old_displayhook = sys.displayhook
+            sys.displayhook = self._displayhook
+        super(DocTestCase, self).setUp()
+
+    def _displayhook(self, value):
+        if value is None:
+            return
+        setattr(builtin_mod, self._result_var,  value)
+        print repr(value)
+
+    def tearDown(self):
+        super(DocTestCase, self).tearDown()
+        if self._result_var is not None:
+            sys.displayhook = self._old_displayhook
+            delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+    """Overrides to provide address() method that returns the correct
+    address for the doc file case.
+    """
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None, result_var='_'):
+        self._result_var = result_var
+        super(DocFileCase, self).__init__(
+            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+            checker=None)
+
+    def address(self):
+        return (self._dt_test.filename, None, None)
+
+    def setUp(self):
+        if self._result_var is not None:
+            self._old_displayhook = sys.displayhook
+            sys.displayhook = self._displayhook
+        super(DocFileCase, self).setUp()
+
+    def _displayhook(self, value):
+        if value is None:
+            return
+        setattr(builtin_mod, self._result_var, value)
+        print repr(value)
+
+    def tearDown(self):
+        super(DocFileCase, self).tearDown()
+        if self._result_var is not None:
+            sys.displayhook = self._old_displayhook
+            delattr(builtin_mod, self._result_var)
diff --git a/nose/plugins/errorclass.py b/nose/plugins/errorclass.py
new file mode 100644 (file)
index 0000000..663dffc
--- /dev/null
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+    >>> class Todo(Exception):
+    ...     pass
+    >>> class TodoError(ErrorClassPlugin):
+    ...     todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+    >>> TodoError.errorClasses # doctest: +ELLIPSIS
+    ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+    >>> import sys
+    >>> import unittest
+    >>> try:
+    ...     # 2.7+
+    ...     from unittest.runner import _WritelnDecorator
+    ... except ImportError:
+    ...     from unittest import _WritelnDecorator
+    ...
+    >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+    >>> class TestTodo(unittest.TestCase):
+    ...     def runTest(self):
+    ...         raise Todo("I need to test something")
+    >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+    >>> plugin = TodoError()
+    >>> from nose.result import _TextTestResult
+    >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+    >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+    >>> case(result) # doctest: +ELLIPSIS
+    runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+    >>> result.errors
+    []
+    >>> result.failures
+    []
+    >>> result.todo # doctest: +ELLIPSIS
+    [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+    >>> result.printErrors() # doctest: +ELLIPSIS
+    <BLANKLINE>
+    ======================================================================
+    TODO: runTest (....TestTodo)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    ...Todo: I need to test something
+    <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+    >>> result.wasSuccessful()
+    False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+    """Metaclass for ErrorClassPlugins that allows error classes to be
+    set up in a declarative manner.
+    """
+    def __init__(self, name, bases, attr):
+        errorClasses = []
+        for name, detail in attr.items():
+            if isinstance(detail, ErrorClass):
+                attr.pop(name)
+                for cls in detail:
+                    errorClasses.append(
+                        (cls, (name, detail.label, detail.isfailure)))
+        super(MetaErrorClass, self).__init__(name, bases, attr)
+        self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+    def __init__(self, *errorClasses, **kw):
+        self.errorClasses = errorClasses
+        try:
+            for key in ('label', 'isfailure'):
+                setattr(self, key, kw.pop(key))
+        except KeyError:
+            raise TypeError("%r is a required named argument for ErrorClass"
+                            % key)
+
+    def __iter__(self):
+        return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin):
+    """
+    Base class for ErrorClass plugins. Subclass this class and declare the
+    exceptions that you wish to handle as attributes of the subclass.
+    """
+    __metaclass__ = MetaErrorClass
+    score = 1000
+    errorClasses = ()
+
+    def addError(self, test, err):
+        err_cls, a, b = err
+        if not isclass(err_cls):
+            return
+        classes = [e[0] for e in self.errorClasses]
+        if filter(lambda c: issubclass(err_cls, c), classes):
+            return True
+
+    def prepareTestResult(self, result):
+        if not hasattr(result, 'errorClasses'):
+            self.patchResult(result)
+        for cls, (storage_attr, label, isfail) in self.errorClasses:
+            if cls not in result.errorClasses:
+                storage = getattr(result, storage_attr, [])
+                setattr(result, storage_attr, storage)
+                result.errorClasses[cls] = (storage, label, isfail)
+
+    def patchResult(self, result):
+        result.printLabel = print_label_patch(result)
+        result._orig_addError, result.addError = \
+            result.addError, add_error_patch(result)
+        result._orig_wasSuccessful, result.wasSuccessful = \
+            result.wasSuccessful, wassuccessful_patch(result)
+        if hasattr(result, 'printErrors'):
+            result._orig_printErrors, result.printErrors = \
+                result.printErrors, print_errors_patch(result)
+        if hasattr(result, 'addSkip'):
+            result._orig_addSkip, result.addSkip = \
+                result.addSkip, add_skip_patch(result)
+        result.errorClasses = {}
+
+
+def add_error_patch(result):
+    """Create a new addError method to patch into a result instance
+    that recognizes the errorClasses attribute and deals with
+    errorclasses correctly.
+    """
+    return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+    """Create a new printErrors method that prints errorClasses items
+    as well.
+    """
+    return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+    """Create a new printLabel method that prints errorClasses items
+    as well.
+    """
+    return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+    """Create a new wasSuccessful method that checks errorClasses for
+    exceptions that were put into other slots than error or failure
+    but that still count as not success.
+    """
+    return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+    """Create a new addSkip method to patch into a result instance
+    that delegates to addError.
+    """
+    return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/nose/plugins/failuredetail.py b/nose/plugins/failuredetail.py
new file mode 100644 (file)
index 0000000..c68b095
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable 
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+    
+from nose.plugins import Plugin
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+    """
+    Plugin that provides extra information in tracebacks of test failures.
+    """
+    score = 600 # before capture
+    
+    def options(self, parser, env):
+        """Register commmandline options.
+        """
+        parser.add_option(
+            "-d", "--detailed-errors", "--failure-detail",
+            action="store_true",
+            default=env.get('NOSE_DETAILED_ERRORS'),
+            dest="detailedErrors", help="Add detail to error"
+            " output by attempting to evaluate failed"
+            " asserts [NOSE_DETAILED_ERRORS]")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.can_configure:
+            return
+        self.enabled = options.detailedErrors
+        self.conf = conf
+
+    def formatFailure(self, test, err):
+        """Add detail from traceback inspection to error message of a failure.
+        """
+        ec, ev, tb = err
+        tbinfo = inspect_traceback(tb)
+        test.tbinfo = tbinfo
+        return (ec, '\n'.join([str(ev), tbinfo]), tb)
diff --git a/nose/plugins/isolate.py b/nose/plugins/isolate.py
new file mode 100644 (file)
index 0000000..13235df
--- /dev/null
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+    def setup(module):
+        module._mods = sys.modules.copy()
+    
+    def teardown(module):
+        to_del = [ m for m in sys.modules.keys() if m not in
+                   module._mods ]
+        for mod in to_del:
+            del sys.modules[mod]
+        sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+    This plugin should not be used in conjunction with other plugins
+    that assume that modules, once imported, will stay imported; for
+    instance, it may cause very odd results when used with the coverage
+    plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+    """
+    Activate the isolation plugin to isolate changes to external
+    modules to a single test module or package. The isolation plugin
+    resets the contents of sys.modules after each test module or
+    package runs to its state before the test. PLEASE NOTE that this
+    plugin should not be used with the coverage plugin, or in any other case
+    where module reloading may produce undesirable side-effects.
+    """
+    score = 10 # I want to be last
+    name = 'isolation'
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """        
+        Plugin.configure(self, options, conf)
+        self._mod_stack = []
+
+    def beforeContext(self):
+        """Copy sys.modules onto my mod stack
+        """
+        mods = sys.modules.copy()
+        self._mod_stack.append(mods)
+
+    def afterContext(self):
+        """Pop my mod stack and restore sys.modules to the state
+        it was in when mod stack was pushed.
+        """
+        mods = self._mod_stack.pop()
+        to_del = [ m for m in sys.modules.keys() if m not in mods ]
+        if to_del:
+            log.debug('removing sys modules entries: %s', to_del)
+            for mod in to_del:
+                del sys.modules[mod]
+        sys.modules.update(mods)
+
+    def loadTestsFromNames(self, names, module=None):
+        """Create a lazy suite that calls beforeContext and afterContext
+        around each name. The side-effect of this is that full context
+        fixtures will be set up and torn down around each test named.
+        """
+        # Fast path for when we don't care
+        if not names or len(names) == 1:
+            return 
+        loader = self.loader
+        plugins = self.conf.plugins
+        def lazy():
+            for name in names:
+                plugins.beforeContext()
+                yield loader.loadTestsFromName(name, module=module)
+                plugins.afterContext()
+        return (loader.suiteClass(lazy), [])
+
+    def prepareTestLoader(self, loader):
+        """Get handle on test loader so we can use it in loadTestsFromNames.
+        """
+        self.loader = loader
+
diff --git a/nose/plugins/logcapture.py b/nose/plugins/logcapture.py
new file mode 100644 (file)
index 0000000..da3fe5d
--- /dev/null
@@ -0,0 +1,236 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option. 
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging.handlers import BufferingHandler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+    def __init__(self, filter_components):
+        self.inclusive, self.exclusive = self._partition(filter_components)
+
+    # @staticmethod
+    def _partition(components):
+        inclusive, exclusive = [], []
+        for component in components:
+            if component.startswith('-'):
+                exclusive.append(component[1:])
+            else:
+                inclusive.append(component)
+        return inclusive, exclusive
+    _partition = staticmethod(_partition)
+
+    def allow(self, record):
+        """returns whether this record should be printed"""
+        if not self:
+            # nothing to filter
+            return True
+        return self._allow(record) and not self._deny(record)
+
+    # @staticmethod
+    def _any_match(matchers, record):
+        """return the bool of whether `record` starts with
+        any item in `matchers`"""
+        def record_matches_key(key):
+            return record == key or record.startswith(key + '.')
+        return anyp(bool, map(record_matches_key, matchers))
+    _any_match = staticmethod(_any_match)
+
+    def _allow(self, record):
+        if not self.inclusive:
+            return True
+        return self._any_match(self.inclusive, record)
+
+    def _deny(self, record):
+        if not self.exclusive:
+            return False
+        return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(BufferingHandler):
+    def __init__(self, capacity, logformat, logdatefmt, filters):
+        BufferingHandler.__init__(self, capacity)
+        fmt = logging.Formatter(logformat, logdatefmt)
+        self.setFormatter(fmt)
+        self.filterset = FilterSet(filters)
+    def flush(self):
+        pass # do nothing
+    def truncate(self):
+        self.buffer = []
+    def filter(self, record):
+        return self.filterset.allow(record.name)
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['lock']
+        return state
+    def __setstate__(self, state):
+        self.__dict__.update(state)
+        self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+    """
+    Log capture plugin. Enabled by default. Disable with --nologcapture.
+    This plugin captures logging statements issued during test execution,
+    appending any output captured to the error or failure output,
+    should the test fail or raise an error.
+    """
+    enabled = True
+    env_opt = 'NOSE_NOLOGCAPTURE'
+    name = 'logcapture'
+    score = 500
+    logformat = '%(name)s: %(levelname)s: %(message)s'
+    logdatefmt = None
+    clear = False
+    filters = ['-nose']
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option(
+            "--nologcapture", action="store_false",
+            default=not env.get(self.env_opt), dest="logcapture",
+            help="Disable logging capture plugin. "
+                 "Logging configurtion will be left intact."
+                 " [NOSE_NOLOGCAPTURE]")
+        parser.add_option(
+            "--logging-format", action="store", dest="logcapture_format",
+            default=env.get('NOSE_LOGFORMAT') or self.logformat,
+            metavar="FORMAT",
+            help="Specify custom format to print statements. "
+                 "Uses the same format as used by standard logging handlers."
+                 " [NOSE_LOGFORMAT]")
+        parser.add_option(
+            "--logging-datefmt", action="store", dest="logcapture_datefmt",
+            default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+            metavar="FORMAT",
+            help="Specify custom date/time format to print statements. "
+                 "Uses the same format as used by standard logging handlers."
+                 " [NOSE_LOGDATEFMT]")
+        parser.add_option(
+            "--logging-filter", action="store", dest="logcapture_filters",
+            default=env.get('NOSE_LOGFILTER'),
+            metavar="FILTER",
+            help="Specify which statements to filter in/out. "
+                 "By default, everything is captured. If the output is too"
+                 " verbose,\nuse this option to filter out needless output.\n"
+                 "Example: filter=foo will capture statements issued ONLY to\n"
+                 " foo or foo.what.ever.sub but not foobar or other logger.\n"
+                 "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+                 "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+                 "it will be excluded rather than included. Default: "
+                 "exclude logging messages from nose itself (-nose)."
+                 " [NOSE_LOGFILTER]\n")
+        parser.add_option(
+            "--logging-clear-handlers", action="store_true",
+            default=False, dest="logcapture_clear",
+            help="Clear all other logging handlers")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        self.conf = conf
+        # Disable if explicitly disabled, or if logging is
+        # configured via logging config file
+        if not options.logcapture or conf.loggingConfig:
+            self.enabled = False
+        self.logformat = options.logcapture_format
+        self.logdatefmt = options.logcapture_datefmt
+        self.clear = options.logcapture_clear
+        if options.logcapture_filters:
+            self.filters = options.logcapture_filters.split(',')
+
+    def setupLoghandler(self):
+        # setup our handler with root logger
+        root_logger = logging.getLogger()
+        if self.clear:
+            if hasattr(root_logger, "handlers"):
+                for handler in root_logger.handlers:
+                    root_logger.removeHandler(handler)
+            for logger in logging.Logger.manager.loggerDict.values():
+                if hasattr(logger, "handlers"):
+                    for handler in logger.handlers:
+                        logger.removeHandler(handler)
+        # make sure there isn't one already
+        # you can't simply use "if self.handler not in root_logger.handlers"
+        # since at least in unit tests this doesn't work --
+        # LogCapture() is instantiated for each test case while root_logger
+        # is module global
+        # so we always add new MyMemoryHandler instance
+        for handler in root_logger.handlers[:]:
+            if isinstance(handler, MyMemoryHandler):
+                root_logger.handlers.remove(handler)
+        root_logger.addHandler(self.handler)
+        # to make sure everything gets captured
+        root_logger.setLevel(logging.NOTSET)
+
+    def begin(self):
+        """Set up logging handler before test run begins.
+        """
+        self.start()
+
+    def start(self):
+        self.handler = MyMemoryHandler(1000, self.logformat, self.logdatefmt,
+                                       self.filters)
+        self.setupLoghandler()
+
+    def end(self):
+        pass
+
+    def beforeTest(self, test):
+        """Clear buffers and handlers before test.
+        """
+        self.setupLoghandler()
+
+    def afterTest(self, test):
+        """Clear buffers after test.
+        """
+        self.handler.truncate()
+
+    def formatFailure(self, test, err):
+        """Add captured log messages to failure output.
+        """
+        return self.formatError(test, err)
+
+    def formatError(self, test, err):
+        """Add captured log messages to error output.
+        """
+        # logic flow copied from Capture.formatError
+        test.capturedLogging = records = self.formatLogRecords()
+        if not records:
+            return err
+        ec, ev, tb = err
+        return (ec, self.addCaptureToErr(ev, records), tb)
+
+    def formatLogRecords(self):
+        format = self.handler.format
+        return [safe_str(format(r)) for r in self.handler.buffer]
+
+    def addCaptureToErr(self, ev, records):
+        return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+                          records + \
+                          [ln('>> end captured logging <<')])
diff --git a/nose/plugins/manager.py b/nose/plugins/manager.py
new file mode 100644 (file)
index 0000000..ce3e48a
--- /dev/null
@@ -0,0 +1,446 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+    This manager doesn't implement loadPlugins, so it can only work
+    with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+    This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+    This manager uses setuptools entrypoints to load plugins.
+
+:class:`DefaultPluginMananger`
+    This is the manager class that will be used by default. If
+    setuptools is installed, it is a subclass of
+    :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+    otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+    This manager is for use in test runs where some plugin calls are
+    not available, such as runs started with ``python setup.py test``,
+    where the test runner is the default unittest :class:`TextTestRunner`. It
+    is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+    import cPickle as pickle
+except:
+    import pickle
+try:
+    from cStringIO import StringIO
+except:
+    from StringIO import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+           'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+    """Proxy for plugin calls. Essentially a closure bound to the
+    given call and plugin list.
+
+    The plugin proxy also must be bound to a particular plugin
+    interface specification, so that it knows what calls are available
+    and any special handling that is required for each call.
+    """
+    interface = IPluginInterface
+    def __init__(self, call, plugins):
+        try:
+            self.method = getattr(self.interface, call)
+        except AttributeError:
+            raise AttributeError("%s is not a valid %s method"
+                                 % (call, self.interface.__name__))
+        self.call = self.makeCall(call)
+        self.plugins = []
+        for p in plugins:
+            self.addPlugin(p, call)
+
+    def __call__(self, *arg, **kw):
+        return self.call(*arg, **kw)
+
+    def addPlugin(self, plugin, call):
+        """Add plugin to my list of plugins to call, if it has the attribute
+        I'm bound to.
+        """
+        meth = getattr(plugin, call, None)
+        if meth is not None:
+            if call == 'loadTestsFromModule' and \
+                    len(inspect.getargspec(meth)[0]) == 2:
+                orig_meth = meth
+                meth = lambda module, path, **kwargs: orig_meth(module)
+            self.plugins.append((plugin, meth))
+
+    def makeCall(self, call):
+        if call == 'loadTestsFromNames':
+            # special case -- load tests from names behaves somewhat differently
+            # from other chainable calls, because plugins return a tuple, only
+            # part of which can be chained to the next plugin.
+            return self._loadTestsFromNames
+
+        meth = self.method
+        if getattr(meth, 'generative', False):
+            # call all plugins and yield a flattened iterator of their results
+            return lambda *arg, **kw: list(self.generate(*arg, **kw))
+        elif getattr(meth, 'chainable', False):
+            return self.chain
+        else:
+            # return a value from the first plugin that returns non-None
+            return self.simple
+
+    def chain(self, *arg, **kw):
+        """Call plugins in a chain, where the result of each plugin call is
+        sent to the next plugin as input. The final output result is returned.
+        """
+        result = None
+        # extract the static arguments (if any) from arg so they can
+        # be passed to each plugin call in the chain
+        static = [a for (static, a)
+                  in zip(getattr(self.method, 'static_args', []), arg)
+                  if static]
+        for p, meth in self.plugins:
+            result = meth(*arg, **kw)
+            arg = static[:]
+            arg.append(result)
+        return result
+
+    def generate(self, *arg, **kw):
+        """Call all plugins, yielding each item in each non-None result.
+        """
+        for p, meth in self.plugins:
+            result = None
+            try:
+                result = meth(*arg, **kw)
+                if result is not None:
+                    for r in result:
+                        yield r
+            except (KeyboardInterrupt, SystemExit):
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(*exc)
+                continue
+
+    def simple(self, *arg, **kw):
+        """Call all plugins, returning the first non-None result.
+        """
+        for p, meth in self.plugins:
+            result = meth(*arg, **kw)
+            if result is not None:
+                return result
+
+    def _loadTestsFromNames(self, names, module=None):
+        """Chainable but not quite normal. Plugins return a tuple of
+        (tests, names) after processing the names. The tests are added
+        to a suite that is accumulated throughout the full call, while
+        names are input for the next plugin in the chain.
+        """
+        suite = []
+        for p, meth in self.plugins:
+            result = meth(names, module=module)
+            if result is not None:
+                suite_part, names = result
+                if suite_part:
+                    suite.extend(suite_part)
+        return suite, names
+
+
+class NoPlugins(object):
+    """Null Plugin manager that has no plugins."""
+    interface = IPluginInterface
+    def __init__(self):
+        self._plugins = self.plugins = ()
+
+    def __iter__(self):
+        return ()
+
+    def _doNothing(self, *args, **kwds):
+        pass
+
+    def _emptyIterator(self, *args, **kwds):
+        return ()
+
+    def __getattr__(self, call):
+        method = getattr(self.interface, call)
+        if getattr(method, "generative", False):
+            return self._emptyIterator
+        else:
+            return self._doNothing
+
+    def addPlugin(self, plug):
+        raise NotImplementedError()
+
+    def addPlugins(self, plugins):
+        raise NotImplementedError()
+
+    def configure(self, options, config):
+        pass
+
+    def loadPlugins(self):
+        pass
+
+    def sort(self):
+        pass
+
+
+class PluginManager(object):
+    """Base class for plugin managers. Does not implement loadPlugins, so it
+    may only be used with a static list of plugins.
+
+    The basic functionality of a plugin manager is to proxy all unknown
+    attributes through a ``PluginProxy`` to a list of plugins.
+
+    Note that the list of plugins *may not* be changed after the first plugin
+    call.
+    """
+    proxyClass = PluginProxy
+
+    def __init__(self, plugins=(), proxyClass=None):
+        self._plugins = []
+        self._proxies = {}
+        if plugins:
+            self.addPlugins(plugins)
+        if proxyClass is not None:
+            self.proxyClass = proxyClass
+
+    def __getattr__(self, call):
+        try:
+            return self._proxies[call]
+        except KeyError:
+            proxy = self.proxyClass(call, self._plugins)
+            self._proxies[call] = proxy
+        return proxy
+
+    def __iter__(self):
+        return iter(self.plugins)
+
+    def addPlugin(self, plug):
+        # allow, for instance, plugins loaded via entry points to
+        # supplant builtin plugins.
+        new_name = getattr(plug, 'name', object())
+        self._plugins[:] = [p for p in self._plugins
+                            if getattr(p, 'name', None) != new_name]
+        self._plugins.append(plug)
+
+    def addPlugins(self, plugins):
+        for plug in plugins:
+            self.addPlugin(plug)
+
+    def configure(self, options, config):
+        """Configure the set of plugins with the given options
+        and config instance. After configuration, disabled plugins
+        are removed from the plugins list.
+        """
+        log.debug("Configuring plugins")
+        self.config = config
+        cfg = PluginProxy('configure', self._plugins)
+        cfg(options, config)
+        enabled = [plug for plug in self._plugins if plug.enabled]
+        self.plugins = enabled
+        self.sort()
+        log.debug("Plugins enabled: %s", enabled)
+
+    def loadPlugins(self):
+        pass
+
+    def sort(self):
+        return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+    def _get_plugins(self):
+        return self._plugins
+
+    def _set_plugins(self, plugins):
+        self._plugins = []
+        self.addPlugins(plugins)
+
+    plugins = property(_get_plugins, _set_plugins, None,
+                       """Access the list of plugins managed by
+                       this plugin manager""")
+
+
+class ZeroNinePlugin:
+    """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+    """
+    def __init__(self, plugin):
+        self.plugin = plugin
+
+    def options(self, parser, env=os.environ):
+        self.plugin.add_options(parser, env)
+
+    def addError(self, test, err):
+        if not hasattr(self.plugin, 'addError'):
+            return
+        # switch off to addSkip, addDeprecated if those types
+        from nose.exc import SkipTest, DeprecatedTest
+        ec, ev, tb = err
+        if issubclass(ec, SkipTest):
+            if not hasattr(self.plugin, 'addSkip'):
+                return
+            return self.plugin.addSkip(test.test)
+        elif issubclass(ec, DeprecatedTest):
+            if not hasattr(self.plugin, 'addDeprecated'):
+                return
+            return self.plugin.addDeprecated(test.test)
+        # add capt
+        capt = test.capturedOutput
+        return self.plugin.addError(test.test, err, capt)
+
+    def loadTestsFromFile(self, filename):
+        if hasattr(self.plugin, 'loadTestsFromPath'):
+            return self.plugin.loadTestsFromPath(filename)
+
+    def addFailure(self, test, err):
+        if not hasattr(self.plugin, 'addFailure'):
+            return
+        # add capt and tbinfo
+        capt = test.capturedOutput
+        tbinfo = test.tbinfo
+        return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+    def addSuccess(self, test):
+        if not hasattr(self.plugin, 'addSuccess'):
+            return
+        capt = test.capturedOutput
+        self.plugin.addSuccess(test.test, capt)
+
+    def startTest(self, test):
+        if not hasattr(self.plugin, 'startTest'):
+            return
+        return self.plugin.startTest(test.test)
+
+    def stopTest(self, test):
+        if not hasattr(self.plugin, 'stopTest'):
+            return
+        return self.plugin.stopTest(test.test)
+
+    def __getattr__(self, val):
+        return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+    """Plugin manager that loads plugins from the `nose.plugins` and
+    `nose.plugins.0.10` entry points.
+    """
+    entry_points = (('nose.plugins.0.10', None),
+                    ('nose.plugins', ZeroNinePlugin))
+
+    def loadPlugins(self):
+        """Load plugins by iterating the `nose.plugins` entry point.
+        """
+        super(EntryPointPluginManager, self).loadPlugins()
+        from pkg_resources import iter_entry_points
+
+        loaded = {}
+        for entry_point, adapt in self.entry_points:
+            for ep in iter_entry_points(entry_point):
+                if ep.name in loaded:
+                    continue
+                loaded[ep.name] = True
+                log.debug('%s load plugin %s', self.__class__.__name__, ep)
+                try:
+                    plugcls = ep.load()
+                except KeyboardInterrupt:
+                    raise
+                except Exception, e:
+                    # never want a plugin load to kill the test run
+                    # but we can't log here because the logger is not yet
+                    # configured
+                    warn("Unable to load plugin %s: %s" % (ep, e),
+                         RuntimeWarning)
+                    continue
+                if adapt:
+                    plug = adapt(plugcls())
+                else:
+                    plug = plugcls()
+                self.addPlugin(plug)
+
+
+class BuiltinPluginManager(PluginManager):
+    """Plugin manager that loads plugins from the list in
+    `nose.plugins.builtin`.
+    """
+    def loadPlugins(self):
+        """Load plugins in nose.plugins.builtin
+        """
+        from nose.plugins import builtin
+        for plug in builtin.plugins:
+            self.addPlugin(plug())
+        super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+    import pkg_resources
+    class DefaultPluginManager(BuiltinPluginManager, EntryPointPluginManager):
+        pass
+except ImportError:
+    DefaultPluginManager = BuiltinPluginManager
+
+
+class RestrictedPluginManager(DefaultPluginManager):
+    """Plugin manager that restricts the plugin list to those not
+    excluded by a list of exclude methods. Any plugin that implements
+    an excluded method will be removed from the manager's plugin list
+    after plugins are loaded.
+    """
+    def __init__(self, plugins=(), exclude=(), load=True):
+        DefaultPluginManager.__init__(self, plugins)
+        self.load = load
+        self.exclude = exclude
+        self.excluded = []
+        self._excludedOpts = None
+
+    def excludedOption(self, name):
+        if self._excludedOpts is None:
+            from optparse import OptionParser
+            self._excludedOpts = OptionParser(add_help_option=False)
+            for plugin in self.excluded:
+                plugin.options(self._excludedOpts, env={})
+        return self._excludedOpts.get_option('--' + name)
+
+    def loadPlugins(self):
+        if self.load:
+            DefaultPluginManager.loadPlugins(self)
+        allow = []
+        for plugin in self.plugins:
+            ok = True
+            for method in self.exclude:
+                if hasattr(plugin, method):
+                    ok = False
+                    self.excluded.append(plugin)
+                    break
+            if ok:
+                allow.append(plugin)
+        self.plugins = allow
diff --git a/nose/plugins/multiprocess.py b/nose/plugins/multiprocess.py
new file mode 100644 (file)
index 0000000..260cbf8
--- /dev/null
@@ -0,0 +1,798 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+   See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+   additional documentation and examples. Use of this plugin on python
+   2.5 or earlier requires the multiprocessing_ module, also available
+   from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+  making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+  TimedOutException exception when they exceed their execution time. The
+  worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+  done, this helps control memory usage. Sometimes memory leaks can accumulate
+  making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+  on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+from Queue import Empty
+from warnings import warn
+try:
+    from cStringIO import StringIO
+except ImportError:
+    import StringIO
+
+# this is a list of plugin classes that will be checked for and created inside 
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+class TimedOutException(Exception):
+    def __init__(self, value = "Timed Out"):
+        self.value = value
+    def __str__(self):
+        return repr(self.value)
+
+def _import_mp():
+    global Process, Queue, Pool, Event, Value, Array
+    try:
+        from multiprocessing import Manager, Process
+        m = Manager()
+        Queue, Pool, Event, Value, Array = (
+                m.Queue, m.Pool, m.Event, m.Value, m.Array
+        )
+    except ImportError:
+        warn("multiprocessing module is not available, multiprocess plugin "
+             "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+    def __init__(self, case):
+        try:
+            self._id = case.id()
+        except AttributeError:
+            pass
+        self._short_description = case.shortDescription()
+        self._str = str(case)
+
+    def id(self):
+        return self._id
+
+    def shortDescription(self):
+        return self._short_description
+
+    def __str__(self):
+        return self._str
+
+class MultiProcess(Plugin):
+    """
+    Run tests in multiple processes. Requires processing module.
+    """
+    score = 1000
+    status = {}
+
+    def options(self, parser, env):
+        """
+        Register command-line options.
+        """
+        parser.add_option("--processes", action="store",
+                          default=env.get('NOSE_PROCESSES', 0),
+                          dest="multiprocess_workers",
+                          metavar="NUM",
+                          help="Spread test run among this many processes. "
+                          "Set a number equal to the number of processors "
+                          "or cores in your machine for best results. "
+                          "[NOSE_PROCESSES]")
+        parser.add_option("--process-timeout", action="store",
+                          default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+                          dest="multiprocess_timeout",
+                          metavar="SECONDS",
+                          help="Set timeout for return of results from each "
+                          "test runner process. [NOSE_PROCESS_TIMEOUT]")
+        parser.add_option("--process-restartworker", action="store_true",
+                          default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+                          dest="multiprocess_restartworker",
+                          help="If set, will restart each worker process once"
+                          " their tests are done, this helps control memory "
+                          "leaks from killing the system. "
+                          "[NOSE_PROCESS_RESTARTWORKER]")
+
+    def configure(self, options, config):
+        """
+        Configure plugin.
+        """
+        try:
+            self.status.pop('active')
+        except KeyError:
+            pass
+        if not hasattr(options, 'multiprocess_workers'):
+            self.enabled = False
+            return
+        # don't start inside of a worker process
+        if config.worker:
+            return
+        self.config = config
+        try:
+            workers = int(options.multiprocess_workers)
+        except (TypeError, ValueError):
+            workers = 0
+        if workers:
+            _import_mp()
+            if Process is None:
+                self.enabled = False
+                return
+            self.enabled = True
+            self.config.multiprocess_workers = workers
+            t = float(options.multiprocess_timeout)
+            self.config.multiprocess_timeout = t
+            r = int(options.multiprocess_restartworker)
+            self.config.multiprocess_restartworker = r
+            self.status['active'] = True
+
+    def prepareTestLoader(self, loader):
+        """Remember loader class so MultiProcessTestRunner can instantiate
+        the right loader.
+        """
+        self.loaderClass = loader.__class__
+
+    def prepareTestRunner(self, runner):
+        """Replace test runner with MultiProcessTestRunner.
+        """
+        # replace with our runner class
+        return MultiProcessTestRunner(stream=runner.stream,
+                                      verbosity=self.config.verbosity,
+                                      config=self.config,
+                                      loaderClass=self.loaderClass)
+
+class MultiProcessTestRunner(TextTestRunner):
+    waitkilltime = 5.0 # max time to wait to terminate a process that does not
+                       # respond to SIGINT
+    def __init__(self, **kw):
+        self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+        super(MultiProcessTestRunner, self).__init__(**kw)
+
+    def run(self, test):
+        """
+        Execute the test (which may be a test suite). If the test is a suite,
+        distribute it out among as many processes as have been configured, at
+        as fine a level as is possible given the context fixtures defined in
+        the suite or any sub-suites.
+
+        """
+        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+        wrapper = self.config.plugins.prepareTest(test)
+        if wrapper is not None:
+            test = wrapper
+
+        # plugins can decorate or capture the output stream
+        wrapped = self.config.plugins.setOutputStream(self.stream)
+        if wrapped is not None:
+            self.stream = wrapped
+
+        testQueue = Queue()
+        resultQueue = Queue()
+        tasks = []
+        completed = []
+        workers = []
+        to_teardown = []
+        shouldStop = Event()
+
+        result = self._makeResult()
+        start = time.time()
+
+        # dispatch and collect results
+        # put indexes only on queue because tests aren't picklable
+        for case in self.nextBatch(test):
+            log.debug("Next batch %s (%s)", case, type(case))
+            if (isinstance(case, nose.case.Test) and
+                isinstance(case.test, failure.Failure)):
+                log.debug("Case is a Failure")
+                case(result) # run here to capture the failure
+                continue
+            # handle shared fixtures
+            if isinstance(case, ContextSuite) and case.context is failure.Failure:
+                log.debug("Case is a Failure")
+                case(result) # run here to capture the failure
+                continue
+            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+                log.debug("%s has shared fixtures", case)
+                try:
+                    case.setUp()
+                except (KeyboardInterrupt, SystemExit):
+                    raise
+                except:
+                    log.debug("%s setup failed", sys.exc_info())
+                    result.addError(case, sys.exc_info())
+                else:
+                    to_teardown.append(case)
+                    for _t in case:
+                        test_addr = self.addtask(testQueue,tasks,_t)
+                        log.debug("Queued shared-fixture test %s (%s) to %s",
+                                  len(tasks), test_addr, testQueue)
+
+            else:
+                test_addr = self.addtask(testQueue,tasks,case)
+                log.debug("Queued test %s (%s) to %s",
+                          len(tasks), test_addr, testQueue)
+
+        log.debug("Starting %s workers", self.config.multiprocess_workers)
+        for i in range(self.config.multiprocess_workers):
+            currentaddr = Value('c',bytes_(''))
+            currentstart = Value('d',0.0)
+            keyboardCaught = Event()
+            p = Process(target=runner, args=(i, testQueue, resultQueue,
+                                             currentaddr, currentstart,
+                                             keyboardCaught, shouldStop,
+                                             self.loaderClass,
+                                             result.__class__,
+                                             pickle.dumps(self.config)))
+            p.currentaddr = currentaddr
+            p.currentstart = currentstart
+            p.keyboardCaught = keyboardCaught
+            # p.setDaemon(True)
+            p.start()
+            workers.append(p)
+            log.debug("Started worker process %s", i+1)
+
+        total_tasks = len(tasks)
+        # need to keep track of the next time to check for timeouts in case
+        # more than one process times out at the same time.
+        nexttimeout=self.config.multiprocess_timeout
+        while tasks:
+            log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+                      len(completed), total_tasks,nexttimeout)
+            try:
+                iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+                                                        timeout=nexttimeout)
+                log.debug('Results received for worker %d, %s, new tasks: %d',
+                          iworker,addr,len(newtask_addrs))
+                try:
+                    try:
+                        tasks.remove(addr)
+                    except ValueError:
+                        log.warn('worker %s failed to remove from tasks: %s',
+                                 iworker,addr)
+                    total_tasks += len(newtask_addrs)
+                    for newaddr in newtask_addrs:
+                        tasks.append(newaddr)
+                except KeyError:
+                    log.debug("Got result for unknown task? %s", addr)
+                    log.debug("current: %s",str(list(tasks)[0]))
+                else:
+                    completed.append([addr,batch_result])
+                self.consolidate(result, batch_result)
+                if (self.config.stopOnError
+                    and not result.wasSuccessful()):
+                    # set the stop condition
+                    shouldStop.set()
+                    break
+                if self.config.multiprocess_restartworker:
+                    log.debug('joining worker %s',iworker)
+                    # wait for working, but not that important if worker
+                    # cannot be joined in fact, for workers that add to
+                    # testQueue, they will not terminate until all their
+                    # items are read
+                    workers[iworker].join(timeout=1)
+                    if not shouldStop.is_set() and not testQueue.empty():
+                        log.debug('starting new process on worker %s',iworker)
+                        currentaddr = Value('c',bytes_(''))
+                        currentstart = Value('d',time.time())
+                        keyboardCaught = Event()
+                        workers[iworker] = Process(target=runner,
+                                                   args=(iworker, testQueue,
+                                                         resultQueue,
+                                                         currentaddr,
+                                                         currentstart,
+                                                         keyboardCaught,
+                                                         shouldStop,
+                                                         self.loaderClass,
+                                                         result.__class__,
+                                                         pickle.dumps(self.config)))
+                        workers[iworker].currentaddr = currentaddr
+                        workers[iworker].currentstart = currentstart
+                        workers[iworker].keyboardCaught = keyboardCaught
+                        workers[iworker].start()
+            except Empty:
+                log.debug("Timed out with %s tasks pending "
+                          "(empty testQueue=%d): %s",
+                          len(tasks),testQueue.empty(),str(tasks))
+                any_alive = False
+                for iworker, w in enumerate(workers):
+                    if w.is_alive():
+                        worker_addr = bytes_(w.currentaddr.value,'ascii')
+                        timeprocessing = time.time() - w.currentstart.value
+                        if ( len(worker_addr) == 0
+                                and timeprocessing > self.config.multiprocess_timeout-0.1):
+                            log.debug('worker %d has finished its work item, '
+                                      'but is not exiting? do we wait for it?',
+                                      iworker)
+                        else:
+                            any_alive = True
+                        if (len(worker_addr) > 0
+                            and timeprocessing > self.config.multiprocess_timeout-0.1):
+                            log.debug('timed out worker %s: %s',
+                                      iworker,worker_addr)
+                            w.currentaddr.value = bytes_('')
+                            # If the process is in C++ code, sending a SIGINT
+                            # might not send a python KeybordInterrupt exception
+                            # therefore, send multiple signals until an
+                            # exception is caught. If this takes too long, then
+                            # terminate the process
+                            w.keyboardCaught.clear()
+                            startkilltime = time.time()
+                            while not w.keyboardCaught.is_set() and w.is_alive():
+                                if time.time()-startkilltime > self.waitkilltime:
+                                    # have to terminate...
+                                    log.error("terminating worker %s",iworker)
+                                    w.terminate()
+                                    currentaddr = Value('c',bytes_(''))
+                                    currentstart = Value('d',time.time())
+                                    keyboardCaught = Event()
+                                    workers[iworker] = Process(target=runner,
+                                        args=(iworker, testQueue, resultQueue,
+                                              currentaddr, currentstart,
+                                              keyboardCaught, shouldStop,
+                                              self.loaderClass,
+                                              result.__class__,
+                                              pickle.dumps(self.config)))
+                                    workers[iworker].currentaddr = currentaddr
+                                    workers[iworker].currentstart = currentstart
+                                    workers[iworker].keyboardCaught = keyboardCaught
+                                    workers[iworker].start()
+                                    # there is a small probability that the
+                                    # terminated process might send a result,
+                                    # which has to be specially handled or
+                                    # else processes might get orphaned.
+                                    w = workers[iworker]
+                                    break
+                                os.kill(w.pid, signal.SIGINT)
+                                time.sleep(0.1)
+                if not any_alive and testQueue.empty():
+                    log.debug("All workers dead")
+                    break
+            nexttimeout=self.config.multiprocess_timeout
+            for w in workers:
+                if w.is_alive() and len(w.currentaddr.value) > 0:
+                    timeprocessing = time.time()-w.currentstart.value
+                    if timeprocessing <= self.config.multiprocess_timeout:
+                        nexttimeout = min(nexttimeout,
+                            self.config.multiprocess_timeout-timeprocessing)
+
+        log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+        for case in to_teardown:
+            log.debug("Tearing down shared fixtures for %s", case)
+            try:
+                case.tearDown()
+            except (KeyboardInterrupt, SystemExit):
+                raise
+            except:
+                result.addError(case, sys.exc_info())
+
+        stop = time.time()
+
+        # first write since can freeze on shutting down processes
+        result.printErrors()
+        result.printSummary(start, stop)
+        self.config.plugins.finalize(result)
+
+        log.debug("Tell all workers to stop")
+        for w in workers:
+            if w.is_alive():
+                testQueue.put('STOP', block=False)
+
+        # wait for the workers to end
+        try:
+            for iworker,worker in enumerate(workers):
+                if worker.is_alive():
+                    log.debug('joining worker %s',iworker)
+                    worker.join()#10)
+                    if worker.is_alive():
+                        log.debug('failed to join worker %s',iworker)
+        except KeyboardInterrupt:
+            log.info('parent received ctrl-c')
+            for worker in workers:
+                worker.terminate()
+                worker.join()
+
+        return result
+
+    def addtask(testQueue,tasks,case):
+        arg = None
+        if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+            # this removes the top level descriptor and allows real function
+            # name to be returned
+            case.test.descriptor = None
+            arg = case.test.arg
+        test_addr = MultiProcessTestRunner.address(case)
+        testQueue.put((test_addr,arg), block=False)
+        if arg is not None:
+            test_addr += str(arg)
+        if tasks is not None:
+            tasks.append(test_addr)
+        return test_addr
+    addtask = staticmethod(addtask)
+
+    def address(case):
+        if hasattr(case, 'address'):
+            file, mod, call = case.address()
+        elif hasattr(case, 'context'):
+            file, mod, call = test_address(case.context)
+        else:
+            raise Exception("Unable to convert %s to address" % case)
+        parts = []
+        if file is None:
+            if mod is None:
+                raise Exception("Unaddressable case %s" % case)
+            else:
+                parts.append(mod)
+        else:
+            # strip __init__.py(c) from end of file part
+            # if present, having it there confuses loader
+            dirname, basename = os.path.split(file)
+            if basename.startswith('__init__'):
+                file = dirname
+            parts.append(file)
+        if call is not None:
+            parts.append(call)
+        return ':'.join(map(str, parts))
+    address = staticmethod(address)
+
+    def nextBatch(self, test):
+        # allows tests or suites to mark themselves as not safe
+        # for multiprocess execution
+        if hasattr(test, 'context'):
+            if not getattr(test.context, '_multiprocess_', True):
+                return
+
+        if ((isinstance(test, ContextSuite)
+             and test.hasFixtures(self.checkCanSplit))
+            or not getattr(test, 'can_split', True)
+            or not isinstance(test, unittest.TestSuite)):
+            # regular test case, or a suite with context fixtures
+
+            # special case: when run like nosetests path/to/module.py
+            # the top-level suite has only one item, and it shares
+            # the same context as that item. In that case, we want the
+            # item, not the top-level suite
+            if isinstance(test, ContextSuite):
+                contained = list(test)
+                if (len(contained) == 1
+                    and getattr(contained[0],
+                                'context', None) == test.context):
+                    test = contained[0]
+            yield test
+        else:
+            # Suite is without fixtures at this level; but it may have
+            # fixtures at any deeper level, so we need to examine it all
+            # the way down to the case level
+            for case in test:
+                for batch in self.nextBatch(case):
+                    yield batch
+
+    def checkCanSplit(self, context, fixt):
+        """
+        Callback that we use to check whether the fixtures found in a
+        context or ancestor are ones we care about.
+
+        Contexts can tell us that their fixtures are reentrant by setting
+        _multiprocess_can_split_. So if we see that, we return False to
+        disregard those fixtures.
+        """
+        if not fixt:
+            return False
+        if getattr(context, '_multiprocess_can_split_', False):
+            return False
+        return True
+
+    def sharedFixtures(self, case):
+        context = getattr(case, 'context', None)
+        if not context:
+            return False
+        return getattr(context, '_multiprocess_shared_', False)
+
+    def consolidate(self, result, batch_result):
+        log.debug("batch result is %s" , batch_result)
+        try:
+            output, testsRun, failures, errors, errorClasses = batch_result
+        except ValueError:
+            log.debug("result in unexpected format %s", batch_result)
+            failure.Failure(*sys.exc_info())(result)
+            return
+        self.stream.write(output)
+        result.testsRun += testsRun
+        result.failures.extend(failures)
+        result.errors.extend(errors)
+        for key, (storage, label, isfail) in errorClasses.items():
+            if key not in result.errorClasses:
+                # Ordinarily storage is result attribute
+                # but it's only processed through the errorClasses
+                # dict, so it's ok to fake it here
+                result.errorClasses[key] = ([], label, isfail)
+            mystorage, _junk, _junk = result.errorClasses[key]
+            mystorage.extend(storage)
+        log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+           keyboardCaught, shouldStop, loaderClass, resultClass, config):
+    try:
+        try:
+            return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+                    keyboardCaught, shouldStop, loaderClass, resultClass, config)
+        except KeyboardInterrupt:
+            log.debug('Worker %s keyboard interrupt, stopping',ix)
+    except Empty:
+        log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+           keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+    config = pickle.loads(config)
+    dummy_parser = config.parserClass()
+    if _instantiate_plugins is not None:
+        for pluginclass in _instantiate_plugins:
+            plugin = pluginclass()
+            plugin.addOptions(dummy_parser,{})
+            config.plugins.addPlugin(plugin)
+    config.plugins.configure(config.options,config)
+    config.plugins.begin()
+    log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+    loader = loaderClass(config=config)
+    loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+    def get():
+        return testQueue.get(timeout=config.multiprocess_timeout)
+
+    def makeResult():
+        stream = _WritelnDecorator(StringIO())
+        result = resultClass(stream, descriptions=1,
+                             verbosity=config.verbosity,
+                             config=config)
+        plug_result = config.plugins.prepareTestResult(result)
+        if plug_result:
+            return plug_result
+        return result
+
+    def batch(result):
+        failures = [(TestLet(c), err) for c, err in result.failures]
+        errors = [(TestLet(c), err) for c, err in result.errors]
+        errorClasses = {}
+        for key, (storage, label, isfail) in result.errorClasses.items():
+            errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+                                 label, isfail)
+        return (
+            result.stream.getvalue(),
+            result.testsRun,
+            failures,
+            errors,
+            errorClasses)
+    for test_addr, arg in iter(get, 'STOP'):
+        if shouldStop.is_set():
+            log.exception('Worker %d STOPPED',ix)
+            break
+        result = makeResult()
+        test = loader.loadTestsFromNames([test_addr])
+        test.testQueue = testQueue
+        test.tasks = []
+        test.arg = arg
+        log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+        try:
+            if arg is not None:
+                test_addr = test_addr + str(arg)
+            currentaddr.value = bytes_(test_addr)
+            currentstart.value = time.time()
+            test(result)
+            currentaddr.value = bytes_('')
+            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+        except KeyboardInterrupt:
+            keyboardCaught.set()
+            if len(currentaddr.value) > 0:
+                log.exception('Worker %s keyboard interrupt, failing '
+                                'current test %s',ix,test_addr)
+                currentaddr.value = bytes_('')
+                failure.Failure(*sys.exc_info())(result)
+                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+            else:
+                log.debug('Worker %s test %s timed out',ix,test_addr)
+                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+        except SystemExit:
+            currentaddr.value = bytes_('')
+            log.exception('Worker %s system exit',ix)
+            raise
+        except:
+            currentaddr.value = bytes_('')
+            log.exception("Worker %s error running test or returning "
+                            "results",ix)
+            failure.Failure(*sys.exc_info())(result)
+            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+        if config.multiprocess_restartworker:
+            break
+    log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+    """
+    Context suite that never fires shared fixtures.
+
+    When a context sets _multiprocess_shared_, fixtures in that context
+    are executed by the main process. Using this suite class prevents them
+    from executing in the runner process as well.
+
+    """
+    testQueue = None
+    tasks = None
+    arg = None
+    def setupContext(self, context):
+        if getattr(context, '_multiprocess_shared_', False):
+            return
+        super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+    def teardownContext(self, context):
+        if getattr(context, '_multiprocess_shared_', False):
+            return
+        super(NoSharedFixtureContextSuite, self).teardownContext(context)
+    def run(self, result):
+        """Run tests in suite inside of suite fixtures.
+        """
+        # proxy the result for myself
+        log.debug("suite %s (%s) run called, tests: %s",
+                  id(self), self, self._tests)
+        if self.resultProxy:
+            result, orig = self.resultProxy(result, self), result
+        else:
+            result, orig = result, result
+        try:
+            self.setUp()
+        except KeyboardInterrupt:
+            raise
+        except:
+            self.error_context = 'setup'
+            result.addError(self, self._exc_info())
+            return
+        try:
+            localtests = [test for test in self._tests]
+            if len(localtests) > 1 and self.testQueue is not None:
+                log.debug("queue %d tests"%len(localtests))
+                for test in localtests:
+                    if isinstance(test.test,nose.failure.Failure):
+                        # proably failed in the generator, so execute directly
+                        # to get the exception
+                        test(orig)
+                    else:
+                        MultiProcessTestRunner.addtask(self.testQueue,
+                                                       self.tasks, test)
+            else:
+                for test in localtests:
+                    if (isinstance(test,nose.case.Test)
+                        and self.arg is not None):
+                        test.test.arg = self.arg
+                    else:
+                        test.arg = self.arg
+                    test.testQueue = self.testQueue
+                    test.tasks = self.tasks
+                    if result.shouldStop:
+                        log.debug("stopping")
+                        break
+                    # each nose.case.Test will create its own result proxy
+                    # so the cases need the original result, to avoid proxy
+                    # chains
+                    try:
+                        test(orig)
+                    except KeyboardInterrupt,e:
+                        err = (TimedOutException,TimedOutException(str(test)),
+                               sys.exc_info()[2])
+                        test.config.plugins.addError(test,err)
+                        orig.addError(test,err)
+        finally:
+            self.has_run = True
+            try:
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.error_context = 'teardown'
+                result.addError(self, self._exc_info())
diff --git a/nose/plugins/plugintest.py b/nose/plugins/plugintest.py
new file mode 100644 (file)
index 0000000..68e8941
--- /dev/null
@@ -0,0 +1,416 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also 
+a mixin for unittest.TestCase called PluginTester that's designed to 
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+    >>> import unittest
+    >>> from nose.plugins import Plugin, PluginTester
+    >>> class FooPlugin(Plugin):
+    ...     pass
+    >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+    ...     activate = '--with-foo'
+    ...     plugins = [FooPlugin()]
+    ...     def test_foo(self):
+    ...         for line in self.output:
+    ...             # i.e. check for patterns
+    ...             pass
+    ... 
+    ...         # or check for a line containing ...
+    ...         assert "ValueError" in self.output
+    ...     def makeSuite(self):
+    ...         class TC(unittest.TestCase):
+    ...             def runTest(self):
+    ...                 raise ValueError("I hate foo")
+    ...         return unittest.TestSuite([TC()])
+    ...
+    >>> res = unittest.TestResult()
+    >>> case = TestPluginFoo('test_foo')
+    >>> case(res)
+    >>> res.errors
+    []
+    >>> res.failures
+    []
+    >>> res.wasSuccessful()
+    True
+    >>> res.testsRun
+    1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+    
+    >>> import unittest, os
+    >>> from nose.plugins import Plugin, PluginTester
+    >>> class FancyOutputter(Plugin):
+    ...     name = "fancy"
+    ...     def configure(self, options, conf):
+    ...         Plugin.configure(self, options, conf)
+    ...         if not self.enabled:
+    ...             return
+    ...         self.fanciness = 1
+    ...         if options.more_fancy:
+    ...             self.fanciness = 2
+    ...         if 'EVEN_FANCIER' in self.env:
+    ...             self.fanciness = 3
+    ... 
+    ...     def options(self, parser, env=os.environ):
+    ...         self.env = env
+    ...         parser.add_option('--more-fancy', action='store_true')
+    ...         Plugin.options(self, parser, env=env)
+    ... 
+    ...     def report(self, stream):
+    ...         stream.write("FANCY " * self.fanciness)
+    ... 
+    >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+    ...     activate = '--with-fancy' # enables the plugin
+    ...     plugins = [FancyOutputter()]
+    ...     args = ['--more-fancy']
+    ...     env = {'EVEN_FANCIER': '1'}
+    ... 
+    ...     def test_fancy_output(self):
+    ...         assert "FANCY FANCY FANCY" in self.output, (
+    ...                                         "got: %s" % self.output)
+    ...     def makeSuite(self):
+    ...         class TC(unittest.TestCase):
+    ...             def runTest(self):
+    ...                 raise ValueError("I hate fancy stuff")
+    ...         return unittest.TestSuite([TC()])
+    ... 
+    >>> res = unittest.TestResult()
+    >>> case = TestFancyOutputter('test_fancy_output')
+    >>> case(res)
+    >>> res.errors
+    []
+    >>> res.failures
+    []
+    >>> res.wasSuccessful()
+    True
+    >>> res.testsRun
+    1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+    """
+    helper for testing multiprocessing
+
+    multiprocessing poses a problem for doctests, since the strategy
+    of replacing sys.stdout/stderr with file-like objects then
+    inspecting the results won't work: the child processes will
+    write to the objects, but the data will not be reflected
+    in the parent doctest-ing process.
+
+    The solution is to create file-like objects which will interact with
+    multiprocessing in a more desirable way.
+
+    All processes can write to this object, but only the creator can read.
+    This allows the testing system to see a unified picture of I/O.
+    """
+    def __init__(self):
+        # per advice at:
+        #    http://docs.python.org/library/multiprocessing.html#all-platforms
+        self.__master = getpid()
+        self.__queue = Manager().Queue()
+        self.__buffer = StringIO()
+        self.softspace = 0
+
+    def buffer(self):
+        if getpid() != self.__master:
+            return
+
+        from Queue import Empty
+        from collections import defaultdict
+        cache = defaultdict(str)
+        while True:
+            try:
+                pid, data = self.__queue.get_nowait()
+            except Empty:
+                break
+            if pid == ():
+                #show parent output after children
+                #this is what users see, usually
+                pid = ( 1e100, ) # googol!
+            cache[pid] += data
+        for pid in sorted(cache):
+            #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+            self.__buffer.write( cache[pid] )
+    def write(self, data):
+        # note that these pids are in the form of current_process()._identity
+        # rather than OS pids
+        from multiprocessing import current_process
+        pid = current_process()._identity
+        self.__queue.put((pid, data))
+    def __iter__(self):
+        "getattr doesn't work for iter()"
+        self.buffer()
+        return self.__buffer
+    def seek(self, offset, whence=0):
+        self.buffer()
+        return self.__buffer.seek(offset, whence)
+    def getvalue(self):
+        self.buffer()
+        return self.__buffer.getvalue()
+    def __getattr__(self, attr):
+        return getattr(self.__buffer, attr)
+    
+try:
+    from multiprocessing import Manager
+    Buffer = MultiProcessFile
+except ImportError:
+    Buffer = StringIO
+
+class PluginTester(object):
+    """A mixin for testing nose plugins in their runtime environment.
+    
+    Subclass this and mix in unittest.TestCase to run integration/functional 
+    tests on your plugin.  When setUp() is called, the stub test suite is 
+    executed with your plugin so that during an actual test you can inspect the 
+    artifacts of how your plugin interacted with the stub test suite.
+    
+    - activate
+    
+      - the argument to send nosetests to activate the plugin
+     
+    - suitepath
+    
+      - if set, this is the path of the suite to test. Otherwise, you
+        will need to use the hook, makeSuite()
+      
+    - plugins
+
+      - the list of plugins to make available during the run. Note
+        that this does not mean these plugins will be *enabled* during
+        the run -- only the plugins enabled by the activate argument
+        or other settings in argv or env will be enabled.
+    
+    - args
+  
+      - a list of arguments to add to the nosetests command, in addition to
+        the activate argument
+    
+    - env
+    
+      - optional dict of environment variables to send nosetests
+
+    """
+    activate = None
+    suitepath = None
+    args = None
+    env = {}
+    argv = None
+    plugins = []
+    ignoreFiles = None
+    
+    def makeSuite(self):
+        """returns a suite object of tests to run (unittest.TestSuite())
+        
+        If self.suitepath is None, this must be implemented. The returned suite 
+        object will be executed with all plugins activated.  It may return 
+        None.
+        
+        Here is an example of a basic suite object you can return ::
+        
+            >>> import unittest
+            >>> class SomeTest(unittest.TestCase):
+            ...     def runTest(self):
+            ...         raise ValueError("Now do something, plugin!")
+            ... 
+            >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+            <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+        
+        """
+        raise NotImplementedError
+    
+    def _execPlugin(self):
+        """execute the plugin on the internal test suite.
+        """
+        from nose.config import Config
+        from nose.core import TestProgram
+        from nose.plugins.manager import PluginManager
+        
+        suite = None
+        stream = Buffer()
+        conf = Config(env=self.env,
+                      stream=stream,
+                      plugins=PluginManager(plugins=self.plugins))
+        if self.ignoreFiles is not None:
+            conf.ignoreFiles = self.ignoreFiles
+        if not self.suitepath:
+            suite = self.makeSuite()
+            
+        self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+                                exit=False)
+        self.output = AccessDecorator(stream)
+                                
+    def setUp(self):
+        """runs nosetests with the specified test suite, all plugins 
+        activated.
+        """
+        self.argv = ['nosetests', self.activate]
+        if self.args:
+            self.argv.extend(self.args)
+        if self.suitepath:
+            self.argv.append(self.suitepath)            
+
+        self._execPlugin()
+
+
+class AccessDecorator(object):
+    stream = None
+    _buf = None
+    def __init__(self, stream):
+        self.stream = stream
+        stream.seek(0)
+        self._buf = stream.read()
+        stream.seek(0)
+    def __contains__(self, val):
+        return val in self._buf
+    def __iter__(self):
+        return iter(self.stream)
+    def __str__(self):
+        return self._buf
+
+
+def blankline_separated_blocks(text):
+    "a bunch of === characters is also considered a blank line"
+    block = []
+    for line in text.splitlines(True):
+        block.append(line)
+        line = line.strip()
+        if not line or line.startswith('===') and not line.strip('='):
+            yield "".join(block)
+            block = []
+    if block:
+        yield "".join(block)
+
+
+def remove_stack_traces(out):
+    # this regexp taken from Python 2.5's doctest
+    traceback_re = re.compile(r"""
+        # Grab the traceback header.  Different versions of Python have
+        # said different things on the first traceback line.
+        ^(?P<hdr> Traceback\ \(
+            (?: most\ recent\ call\ last
+            |   innermost\ last
+            ) \) :
+        )
+        \s* $                   # toss trailing whitespace on the header.
+        (?P<stack> .*?)         # don't blink: absorb stuff until...
+        ^(?=\w)                 #     a line *starts* with alphanum.
+        .*?(?P<exception> \w+ ) # exception name
+        (?P<msg> [:\n] .*)      # the rest
+        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+    blocks = []
+    for block in blankline_separated_blocks(out):
+        blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+    return "".join(blocks)
+
+
+def simplify_warnings(out):
+    warn_re = re.compile(r"""
+        # Cut the file and line no, up to the warning name
+        ^.*:\d+:\s
+        (?P<category>\w+): \s+        # warning category
+        (?P<detail>.+) $ \n?          # warning message
+        ^ .* $                        # stack frame
+        """, re.VERBOSE | re.MULTILINE)
+    return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+    return re.sub(
+        r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+    """Modify nose output to make it easy to use in doctests."""
+    out = remove_stack_traces(out)
+    out = simplify_warnings(out)
+    out = remove_timings(out)
+    return out.strip()
+
+
+def run(*arg, **kw):
+    """
+    Specialized version of nose.run for use inside of doctests that
+    test test runs.
+
+    This version of run() prints the result output to stdout.  Before
+    printing, the output is processed by replacing the timing
+    information with an ellipsis (...), removing traceback stacks, and
+    removing trailing whitespace.
+
+    Use this version of run wherever you are writing a doctest that
+    tests nose (or unittest) test result output.
+
+    Note: do not use doctest: +ELLIPSIS when testing nose output,
+    since ellipses ("test_foo ... ok") in your expected test runner
+    output may match multiple lines of output, causing spurious test
+    passes!
+    """
+    from nose import run
+    from nose.config import Config
+    from nose.plugins.manager import PluginManager
+
+    buffer = Buffer()
+    if 'config' not in kw:
+        plugins = kw.pop('plugins', [])
+        if isinstance(plugins, list):
+            plugins = PluginManager(plugins=plugins)
+        env = kw.pop('env', {})
+        kw['config'] = Config(env=env, plugins=plugins)
+    if 'argv' not in kw:
+        kw['argv'] = ['nosetests', '-v']
+    kw['config'].stream = buffer
+    
+    # Set up buffering so that all output goes to our buffer,
+    # or warn user if deprecated behavior is active. If this is not
+    # done, prints and warnings will either be out of place or
+    # disappear.
+    stderr = sys.stderr
+    stdout = sys.stdout
+    if kw.pop('buffer_all', False):
+        sys.stdout = sys.stderr = buffer
+        restore = True
+    else:
+        restore = False
+        warn("The behavior of nose.plugins.plugintest.run() will change in "
+             "the next release of nose. The current behavior does not "
+             "correctly account for output to stdout and stderr. To enable "
+             "correct behavior, use run_buffered() instead, or pass "
+             "the keyword argument buffer_all=True to run().",
+             DeprecationWarning, stacklevel=2)
+    try:
+        run(*arg, **kw)
+    finally:
+        if restore:
+            sys.stderr = stderr
+            sys.stdout = stdout
+    out = buffer.getvalue()
+    print munge_nose_output_for_doctest(out)
+
+    
+def run_buffered(*arg, **kw):
+    kw['buffer_all'] = True
+    run(*arg, **kw)
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/nose/plugins/prof.py b/nose/plugins/prof.py
new file mode 100644 (file)
index 0000000..4d304a9
--- /dev/null
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+    import hotshot
+    from hotshot import stats
+except ImportError:
+    hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+    """
+    Use this plugin to run tests using the hotshot profiler. 
+    """
+    pfile = None
+    clean_stats_file = False
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        if not self.available():
+            return
+        Plugin.options(self, parser, env)
+        parser.add_option('--profile-sort', action='store', dest='profile_sort',
+                          default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+                          metavar="SORT",
+                          help="Set sort order for profiler output")
+        parser.add_option('--profile-stats-file', action='store',
+                          dest='profile_stats_file',
+                          metavar="FILE",
+                          default=env.get('NOSE_PROFILE_STATS_FILE'),
+                          help='Profiler stats file; default is a new '
+                          'temp file on each run')
+        parser.add_option('--profile-restrict', action='append',
+                          dest='profile_restrict',
+                          metavar="RESTRICT",
+                          default=env.get('NOSE_PROFILE_RESTRICT'),
+                          help="Restrict profiler output. See help for "
+                          "pstats.Stats for details")
+
+    def available(cls):
+        return hotshot is not None
+    available = classmethod(available)
+
+    def begin(self):
+        """Create profile stats file and load profiler.
+        """
+        if not self.available():
+            return
+        self._create_pfile()
+        self.prof = hotshot.Profile(self.pfile)
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.available():
+            self.enabled = False
+            return
+        Plugin.configure(self, options, conf)
+        self.conf = conf
+        if options.profile_stats_file:
+            self.pfile = options.profile_stats_file
+            self.clean_stats_file = False
+        else:
+            self.pfile = None
+            self.clean_stats_file = True
+        self.fileno = None
+        self.sort = options.profile_sort
+        self.restrict = tolist(options.profile_restrict)
+
+    def prepareTest(self, test):
+        """Wrap entire test run in :func:`prof.runcall`.
+        """
+        if not self.available():
+            return
+        log.debug('preparing test %s' % test)
+        def run_and_profile(result, prof=self.prof, test=test):
+            self._create_pfile()
+            prof.runcall(test, result)
+        return run_and_profile
+
+    def report(self, stream):
+        """Output profiler report.
+        """
+        log.debug('printing profiler report')
+        self.prof.close()
+        prof_stats = stats.load(self.pfile)
+        prof_stats.sort_stats(self.sort)
+
+        # 2.5 has completely different stream handling from 2.4 and earlier.
+        # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+        # a reference sys.stdout is stored before we can tweak it.
+        compat_25 = hasattr(prof_stats, 'stream')
+        if compat_25:
+            tmp = prof_stats.stream
+            prof_stats.stream = stream
+        else:
+            tmp = sys.stdout
+            sys.stdout = stream
+        try:
+            if self.restrict:
+                log.debug('setting profiler restriction to %s', self.restrict)
+                prof_stats.print_stats(*self.restrict)
+            else:
+                prof_stats.print_stats()
+        finally:
+            if compat_25:
+                prof_stats.stream = tmp
+            else:
+                sys.stdout = tmp
+
+    def finalize(self, result):
+        """Clean up stats file, if configured to do so.
+        """
+        if not self.available():
+            return
+        try:
+            self.prof.close()
+        except AttributeError:
+            # TODO: is this trying to catch just the case where not
+            # hasattr(self.prof, "close")?  If so, the function call should be
+            # moved out of the try: suite.
+            pass
+        if self.clean_stats_file:
+            if self.fileno:
+                try:
+                    os.close(self.fileno)
+                except OSError:
+                    pass
+            try:
+                os.unlink(self.pfile)
+            except OSError:
+                pass
+        return None
+
+    def _create_pfile(self):
+        if not self.pfile:
+            self.fileno, self.pfile = tempfile.mkstemp()
+            self.clean_stats_file = True
diff --git a/nose/plugins/skip.py b/nose/plugins/skip.py
new file mode 100644 (file)
index 0000000..27e5162
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+try:
+    # 2.7
+    from unittest.case import SkipTest
+except ImportError:
+    # 2.6 and below
+    class SkipTest(Exception):
+        """Raise this exception to mark a test as skipped.
+        """
+    pass
+
+
+class Skip(ErrorClassPlugin):
+    """
+    Plugin that installs a SKIP error class for the SkipTest
+    exception.  When SkipTest is raised, the exception will be logged
+    in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+    will be output, and the exception will not be counted as an error
+    or failure.
+    """
+    enabled = True
+    skipped = ErrorClass(SkipTest,
+                         label='SKIP',
+                         isfailure=False)
+
+    def options(self, parser, env):
+        """
+        Add my options to command line.
+        """
+        env_opt = 'NOSE_WITHOUT_SKIP'
+        parser.add_option('--no-skip', action='store_true',
+                          dest='noSkip', default=env.get(env_opt, False),
+                          help="Disable special handling of SkipTest "
+                          "exceptions.")
+
+    def configure(self, options, conf):
+        """
+        Configure plugin. Skip plugin is enabled by default.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        disable = getattr(options, 'noSkip', False)
+        if disable:
+            self.enabled = False
+
diff --git a/nose/plugins/testid.py b/nose/plugins/testid.py
new file mode 100644 (file)
index 0000000..80f282d
--- /dev/null
@@ -0,0 +1,306 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+  % nosetests -v
+  tests.test_a ... ok
+  tests.test_b ... ok
+  tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+  % nosetests -v --with-id
+  #1 tests.test_a ... ok
+  #2 tests.test_b ... ok
+  #2 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+  % nosetests -v --with-id 2
+  #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+  % nosetests -v --with-id 2 3
+  #2 tests.test_b ... ok
+  #3 tests.test_c ... ok
+  
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+  If you expect to use ``--failed`` regularly, it's a good idea to always run
+  run using the ``--with-id`` option. This will ensure that an id file is
+  always created, allowing you to add ``--failed`` to the command line as soon
+  as you have failing tests. Otherwise, your first run using ``--failed`` will
+  (perhaps surprisingly) run *all* tests, because there won't be an id file
+  containing the record of failed tests from your previous run.
+  
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+    from pickle import dump, load
+except ImportError:
+    from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+    """
+    Activate to add a test id (like #1) to each test name output. Activate
+    with --failed to rerun failing tests only.
+    """
+    name = 'id'
+    idfile = None
+    collecting = True
+    loopOnFailed = False
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        Plugin.options(self, parser, env)
+        parser.add_option('--id-file', action='store', dest='testIdFile',
+                          default='.noseids', metavar="FILE",
+                          help="Store test ids found in test runs in this "
+                          "file. Default is the file .noseids in the "
+                          "working directory.")
+        parser.add_option('--failed', action='store_true',
+                          dest='failed', default=False,
+                          help="Run the tests that failed in the last "
+                          "test run.")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        Plugin.configure(self, options, conf)
+        if options.failed:
+            self.enabled = True
+            self.loopOnFailed = True
+            log.debug("Looping on failed tests")
+        self.idfile = os.path.expanduser(options.testIdFile)
+        if not os.path.isabs(self.idfile):
+            self.idfile = os.path.join(conf.workingDir, self.idfile)
+        self.id = 1
+        # Ids and tests are mirror images: ids are {id: test address} and
+        # tests are {test address: id}
+        self.ids = {}
+        self.tests = {}
+        self.failed = []
+        self.source_names = []
+        # used to track ids seen when tests is filled from
+        # loaded ids file
+        self._seen = {}
+        self._write_hashes = conf.verbosity >= 2
+
+    def finalize(self, result):
+        """Save new ids file, if needed.
+        """
+        if result.wasSuccessful():
+            self.failed = []
+        if self.collecting:
+            ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+        else:
+            ids = self.ids
+        fh = open(self.idfile, 'wb')
+        dump({'ids': ids,
+              'failed': self.failed,
+              'source_names': self.source_names}, fh)
+        fh.close()
+        log.debug('Saved test ids: %s, failed %s to %s',
+                  ids, self.failed, self.idfile)
+
+    def loadTestsFromNames(self, names, module=None):
+        """Translate ids in the list of requested names into their
+        test addresses, if they are found in my dict of tests.
+        """
+        log.debug('ltfn %s %s', names, module)
+        try:
+            fh = open(self.idfile, 'rb')
+            data = load(fh)
+            if 'ids' in data:
+                self.ids = data['ids']
+                self.failed = data['failed']
+                self.source_names = data['source_names']
+            else:
+                # old ids field
+                self.ids = data
+                self.failed = []
+                self.source_names = names
+            if self.ids:
+                self.id = max(self.ids) + 1
+                self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+            else:
+                self.id = 1
+            log.debug(
+                'Loaded test ids %s tests %s failed %s sources %s from %s',
+                self.ids, self.tests, self.failed, self.source_names,
+                self.idfile)
+            fh.close()
+        except IOError:
+            log.debug('IO error reading %s', self.idfile)
+
+        if self.loopOnFailed and self.failed:
+            self.collecting = False
+            names = self.failed
+            self.failed = []
+        # I don't load any tests myself, only translate names like '#2'
+        # into the associated test addresses
+        translated = []
+        new_source = []
+        really_new = []
+        for name in names:
+            trans = self.tr(name)
+            if trans != name:
+                translated.append(trans)
+            else:
+                new_source.append(name)
+        # names that are not ids and that are not in the current
+        # list of source names go into the list for next time
+        if new_source:
+            new_set = set(new_source)
+            old_set = set(self.source_names)
+            log.debug("old: %s new: %s", old_set, new_set)
+            really_new = [s for s in new_source
+                          if not s in old_set]
+            if really_new:
+                # remember new sources
+                self.source_names.extend(really_new)
+            if not translated:
+                # new set of source names, no translations
+                # means "run the requested tests"
+                names = new_source
+        else:
+            # no new names to translate and add to id set
+            self.collecting = False
+        log.debug("translated: %s new sources %s names %s",
+                  translated, really_new, names)
+        return (None, translated + really_new or names)
+
+    def makeName(self, addr):
+        log.debug("Make name %s", addr)
+        filename, module, call = addr
+        if filename is not None:
+            head = src(filename)
+        else:
+            head = module
+        if call is not None:
+            return "%s:%s" % (head, call)
+        return head
+
+    def setOutputStream(self, stream):
+        """Get handle on output stream so the plugin can print id #s
+        """
+        self.stream = stream
+
+    def startTest(self, test):
+        """Maybe output an id # before the test name.
+
+        Example output::
+
+          #1 test.test ... ok
+          #2 test.test_two ... ok
+
+        """
+        adr = test.address()
+        log.debug('start test %s (%s)', adr, adr in self.tests)
+        if adr in self.tests:
+            if adr in self._seen:
+                self.write('   ')
+            else:
+                self.write('#%s ' % self.tests[adr])
+                self._seen[adr] = 1
+            return
+        self.tests[adr] = self.id
+        self.write('#%s ' % self.id)
+        self.id += 1
+
+    def afterTest(self, test):
+        # None means test never ran, False means failed/err
+        if test.passed is False:
+            try:
+                key = str(self.tests[test.address()])
+            except KeyError:
+                # never saw this test -- startTest didn't run
+                pass
+            else:
+                if key not in self.failed:
+                    self.failed.append(key)
+
+    def tr(self, name):
+        log.debug("tr '%s'", name)
+        try:
+            key = int(name.replace('#', ''))
+        except ValueError:
+            return name
+        log.debug("Got key %s", key)
+        # I'm running tests mapped from the ids file,
+        # not collecting new ones
+        if key in self.ids:
+            return self.makeName(self.ids[key])
+        return name
+
+    def write(self, output):
+        if self._write_hashes:
+            self.stream.write(output)
diff --git a/nose/plugins/xunit.py b/nose/plugins/xunit.py
new file mode 100644 (file)
index 0000000..ded973e
--- /dev/null
@@ -0,0 +1,253 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+    nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+    **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+    <?xml version="1.0" encoding="UTF-8"?>
+    <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+        <testcase classname="path_to_test_suite.TestSomething"
+                  name="test_it" time="0">
+            <error type="exceptions.TypeError" message="oops, wrong type">
+            Traceback (most recent call last):
+            ...
+            TypeError: oops, wrong type
+            </error>
+        </testcase>
+    </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import traceback
+import re
+import inspect
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import UNICODE_STRINGS
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+    """Replaces invalid XML characters with '?'."""
+    return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+    """Escape a string for an XML CDATA section."""
+    return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+    m = TEST_ID.match(idval)
+    if m:
+        name, fargs = m.groups()
+        head, tail = name.rsplit(".", 1)
+        return [head, tail+fargs]
+    else:
+        return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+    """Returns a nice name for class object or class instance.
+
+        >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+        '...Exception'
+        >>> nice_classname(Exception) # doctest: +ELLIPSIS
+        '...Exception'
+
+    """
+    if inspect.isclass(obj):
+        cls_name = obj.__name__
+    else:
+        cls_name = obj.__class__.__name__
+    mod = inspect.getmodule(obj)
+    if mod:
+        name = mod.__name__
+        # jython
+        if name.startswith('org.python.core.'):
+            name = name[len('org.python.core.'):]
+        return "%s.%s" % (name, cls_name)
+    else:
+        return cls_name
+
+def exc_message(exc_info):
+    """Return the exception's message."""
+    exc = exc_info[1]
+    if exc is None:
+        # str exception
+        result = exc_info[0]
+    else:
+        try:
+            result = str(exc)
+        except UnicodeEncodeError:
+            try:
+                result = unicode(exc)
+            except UnicodeError:
+                # Fallback to args as neither str nor
+                # unicode(Exception(u'\xe6')) work in Python < 2.6
+                result = exc.args[0]
+    return xml_safe(result)
+
+class Xunit(Plugin):
+    """This plugin provides test results in the standard XUnit XML format."""
+    name = 'xunit'
+    score = 2000
+    encoding = 'UTF-8'
+    error_report_file = None
+
+    def _timeTaken(self):
+        if hasattr(self, '_timer'):
+            taken = time() - self._timer
+        else:
+            # test died before it ran (probably error in setup())
+            # or success/failure added before test started probably 
+            # due to custom TestResult munging
+            taken = 0.0
+        return taken
+
+    def _quoteattr(self, attr):
+        """Escape an XML attribute. Value can be unicode."""
+        attr = xml_safe(attr)
+        if isinstance(attr, unicode) and not UNICODE_STRINGS:
+            attr = attr.encode(self.encoding)
+        return saxutils.quoteattr(attr)
+
+    def options(self, parser, env):
+        """Sets additional command line options."""
+        Plugin.options(self, parser, env)
+        parser.add_option(
+            '--xunit-file', action='store',
+            dest='xunit_file', metavar="FILE",
+            default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+            help=("Path to xml file to store the xunit report in. "
+                  "Default is nosetests.xml in the working directory "
+                  "[NOSE_XUNIT_FILE]"))
+
+    def configure(self, options, config):
+        """Configures the xunit plugin."""
+        Plugin.configure(self, options, config)
+        self.config = config
+        if self.enabled:
+            self.stats = {'errors': 0,
+                          'failures': 0,
+                          'passes': 0,
+                          'skipped': 0
+                          }
+            self.errorlist = []
+            self.error_report_file = codecs.open(options.xunit_file, 'w',
+                                                 self.encoding, 'replace')
+
+    def report(self, stream):
+        """Writes an Xunit-formatted XML file
+
+        The file includes a report of test errors and failures.
+
+        """
+        self.stats['encoding'] = self.encoding
+        self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+                               + self.stats['passes'] + self.stats['skipped'])
+        self.error_report_file.write(
+            u'<?xml version="1.0" encoding="%(encoding)s"?>'
+            u'<testsuite name="nosetests" tests="%(total)d" '
+            u'errors="%(errors)d" failures="%(failures)d" '
+            u'skip="%(skipped)d">' % self.stats)
+        self.error_report_file.write(u''.join([self._forceUnicode(e)
+                                               for e in self.errorlist]))
+        self.error_report_file.write(u'</testsuite>')
+        self.error_report_file.close()
+        if self.config.verbosity > 1:
+            stream.writeln("-" * 70)
+            stream.writeln("XML: %s" % self.error_report_file.name)
+
+    def startTest(self, test):
+        """Initializes a timer before starting a test."""
+        self._timer = time()
+
+    def addError(self, test, err, capt=None):
+        """Add error output to Xunit report.
+        """
+        taken = self._timeTaken()
+
+        if issubclass(err[0], SkipTest):
+            type = 'skipped'
+            self.stats['skipped'] += 1
+        else:
+            type = 'error'
+            self.stats['errors'] += 1
+        tb = ''.join(traceback.format_exception(*err))
+        id = test.id()
+        self.errorlist.append(
+            '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+            '<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+            '</%(type)s></testcase>' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             'type': type,
+             'errtype': self._quoteattr(nice_classname(err[0])),
+             'message': self._quoteattr(exc_message(err)),
+             'tb': escape_cdata(tb),
+             })
+
+    def addFailure(self, test, err, capt=None, tb_info=None):
+        """Add failure output to Xunit report.
+        """
+        taken = self._timeTaken()
+        tb = ''.join(traceback.format_exception(*err))
+        self.stats['failures'] += 1
+        id = test.id()
+        self.errorlist.append(
+            '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+            '<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+            '</failure></testcase>' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             'errtype': self._quoteattr(nice_classname(err[0])),
+             'message': self._quoteattr(exc_message(err)),
+             'tb': escape_cdata(tb),
+             })
+
+    def addSuccess(self, test, capt=None):
+        """Add success output to Xunit report.
+        """
+        taken = self._timeTaken()
+        self.stats['passes'] += 1
+        id = test.id()
+        self.errorlist.append(
+            '<testcase classname=%(cls)s name=%(name)s '
+            'time="%(taken).3f" />' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             })
+
+    def _forceUnicode(self, s):
+        if not UNICODE_STRINGS:
+            if isinstance(s, str):
+                s = s.decode(self.encoding, 'replace')
+        return s
diff --git a/nose/proxy.py b/nose/proxy.py
new file mode 100644 (file)
index 0000000..8723290
--- /dev/null
@@ -0,0 +1,191 @@
+"""
+Result Proxy
+------------
+
+The result proxy wraps the result instance given to each test. It
+performs two functions: enabling extended error/failure reporting
+and calling plugins.
+
+As each result event is fired, plugins are called with the same event;
+however, plugins are called with the nose.case.Test instance that
+wraps the actual test. So when a test fails and calls
+result.addFailure(self, err), the result proxy calls
+addFailure(self.test, err) for each plugin. This allows plugins to
+have a single stable interface for all test types, and also to
+manipulate the test object itself by setting the `test` attribute of
+the nose.case.Test that they receive.
+"""
+import logging
+from nose.config import Config
+
+
+log = logging.getLogger(__name__)
+
+
+def proxied_attribute(local_attr, proxied_attr, doc):
+    """Create a property that proxies attribute ``proxied_attr`` through
+    the local attribute ``local_attr``.
+    """
+    def fget(self):
+        return getattr(getattr(self, local_attr), proxied_attr)
+    def fset(self, value):
+        setattr(getattr(self, local_attr), proxied_attr, value)
+    def fdel(self):
+        delattr(getattr(self, local_attr), proxied_attr)
+    return property(fget, fset, fdel, doc)
+
+
+class ResultProxyFactory(object):
+    """Factory for result proxies. Generates a ResultProxy bound to each test
+    and the result passed to the test.
+    """
+    def __init__(self, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        self.__prepared = False
+        self.__result = None
+
+    def __call__(self, result, test):
+        """Return a ResultProxy for the current test.
+
+        On first call, plugins are given a chance to replace the
+        result used for the remaining tests. If a plugin returns a
+        value from prepareTestResult, that object will be used as the
+        result for all tests.
+        """
+        if not self.__prepared:
+            self.__prepared = True
+            plug_result = self.config.plugins.prepareTestResult(result)
+            if plug_result is not None:
+                self.__result = result = plug_result
+        if self.__result is not None:
+            result = self.__result
+        return ResultProxy(result, test, config=self.config)
+
+
+class ResultProxy(object):
+    """Proxy to TestResults (or other results handler).
+
+    One ResultProxy is created for each nose.case.Test. The result
+    proxy calls plugins with the nose.case.Test instance (instead of
+    the wrapped test case) as each result call is made. Finally, the
+    real result method is called, also with the nose.case.Test
+    instance as the test parameter.
+
+    """
+    def __init__(self, result, test, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        self.plugins = config.plugins
+        self.result = result
+        self.test = test
+
+    def __repr__(self):
+        return repr(self.result)
+
+    def _prepareErr(self, err):
+        if not isinstance(err[1], Exception):
+            # Turn value back into an Exception (required in Python 3.x).
+            # Plugins do all sorts of crazy things with exception values.
+            try:
+                # The actual exception class is needed for failure detail
+                # but maybe other plugins?
+                value = err[0](err[1])
+            except:
+                value = Exception(err[1])
+            err = (err[0], value, err[2])
+        return err
+
+    def assertMyTest(self, test):
+        # The test I was called with must be my .test or my
+        # .test's .test. or my .test.test's .case
+
+        case = getattr(self.test, 'test', None)
+        assert (test is self.test
+                or test is case
+                or test is getattr(case, '_nose_case', None)), (
+                "ResultProxy for %r (%s) was called with test %r (%s)"
+                % (self.test, id(self.test), test, id(test)))
+
+    def afterTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.afterTest(self.test)
+        if hasattr(self.result, "afterTest"):
+            self.result.afterTest(self.test)
+
+    def beforeTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.beforeTest(self.test)
+        if hasattr(self.result, "beforeTest"):
+            self.result.beforeTest(self.test)
+
+    def addError(self, test, err):
+        self.assertMyTest(test)
+        plugins = self.plugins
+        plugin_handled = plugins.handleError(self.test, err)
+        if plugin_handled:
+            return
+        # test.passed is set in result, to account for error classes
+        formatted = plugins.formatError(self.test, err)
+        if formatted is not None:
+            err = formatted
+        plugins.addError(self.test, err)
+        self.result.addError(self.test, self._prepareErr(err))
+        if not self.result.wasSuccessful() and self.config.stopOnError:
+            self.shouldStop = True
+
+    def addFailure(self, test, err):
+        self.assertMyTest(test)
+        plugins = self.plugins
+        plugin_handled = plugins.handleFailure(self.test, err)
+        if plugin_handled:
+            return
+        self.test.passed = False
+        formatted = plugins.formatFailure(self.test, err)
+        if formatted is not None:
+            err = formatted
+        plugins.addFailure(self.test, err)
+        self.result.addFailure(self.test, self._prepareErr(err))
+        if self.config.stopOnError:
+            self.shouldStop = True
+
+    def addSkip(self, test, reason):
+        # 2.7 compat shim
+        from nose.plugins.skip import SkipTest
+        self.assertMyTest(test)
+        plugins = self.plugins
+        if not isinstance(reason, Exception):
+            # for Python 3.2+
+            reason = Exception(reason)
+        plugins.addError(self.test, (SkipTest, reason, None))
+        self.result.addSkip(self.test, reason)
+
+    def addSuccess(self, test):
+        self.assertMyTest(test)
+        self.plugins.addSuccess(self.test)
+        self.result.addSuccess(self.test)
+
+    def startTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.startTest(self.test)
+        self.result.startTest(self.test)
+
+    def stop(self):
+        self.result.stop()
+
+    def stopTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.stopTest(self.test)
+        self.result.stopTest(self.test)
+
+    # proxied attributes
+    shouldStop = proxied_attribute('result', 'shouldStop',
+                                    """Should the test run stop?""")
+    errors = proxied_attribute('result', 'errors',
+                               """Tests that raised an exception""")
+    failures = proxied_attribute('result', 'failures',
+                                 """Tests that failed""")
+    testsRun = proxied_attribute('result', 'testsRun',
+                                 """Number of tests run""")
diff --git a/nose/pyversion.py b/nose/pyversion.py
new file mode 100644 (file)
index 0000000..36ddcfd
--- /dev/null
@@ -0,0 +1,130 @@
+"""
+This module contains fixups for using nose under different versions of Python.
+"""
+import sys
+import os
+import types
+import inspect
+import nose.util
+
+__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
+           'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
+           'bytes_']
+
+# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
+# source will be replaced with 'str()' when running 2to3, so this test will
+# then become true)
+UNICODE_STRINGS = (type(unicode()) == type(str()))
+
+# new.instancemethod() is obsolete for new-style classes (Python 3.x)
+# We need to use descriptor methods instead.
+try:
+    import new
+    def make_instancemethod(function, instance):
+        return new.instancemethod(function.im_func, instance,
+                                  instance.__class__)
+except ImportError:
+    def make_instancemethod(function, instance):
+        return function.__get__(instance, instance.__class__)
+
+# To be forward-compatible, we do all list sorts using keys instead of cmp
+# functions.  However, part of the unittest.TestLoader API involves a
+# user-provideable cmp function, so we need some way to convert that.
+def cmp_to_key(mycmp):
+    'Convert a cmp= function into a key= function'
+    class Key(object):
+        def __init__(self, obj):
+            self.obj = obj
+        def __lt__(self, other):
+            return mycmp(self.obj, other.obj) < 0
+        def __gt__(self, other):
+            return mycmp(self.obj, other.obj) > 0
+        def __eq__(self, other):
+            return mycmp(self.obj, other.obj) == 0
+    return Key
+
+# Python 2.3 also does not support list-sorting by key, so we need to convert
+# keys to cmp functions if we're running on old Python..
+if sys.version_info < (2, 4):
+    def sort_list(l, key, reverse=False):
+        if reverse:
+            return l.sort(lambda a, b: cmp(key(b), key(a)))
+        else:
+            return l.sort(lambda a, b: cmp(key(a), key(b)))
+else:
+    def sort_list(l, key, reverse=False):
+        return l.sort(key=key, reverse=reverse)
+
+# In Python 3.x, all objects are "new style" objects descended from 'type', and
+# thus types.ClassType and types.TypeType don't exist anymore.  For
+# compatibility, we make sure they still work.
+if hasattr(types, 'ClassType'):
+    ClassType = types.ClassType
+    TypeType = types.TypeType
+else:
+    ClassType = type
+    TypeType = type
+
+# The following emulates the behavior (we need) of an 'unbound method' under
+# Python 3.x (namely, the ability to have a class associated with a function
+# definition so that things can do stuff based on its associated class)
+class UnboundMethod:
+    def __init__(self, cls, func):
+        # Make sure we have all the same attributes as the original function,
+        # so that the AttributeSelector plugin will work correctly...
+        self.__dict__ = func.__dict__.copy()
+        self._func = func
+        self.__self__ = UnboundSelf(cls)
+
+    def address(self):
+        cls = self.__self__.cls
+        modname = cls.__module__
+        module = sys.modules[modname]
+        filename = getattr(module, '__file__', None)
+        if filename is not None:
+            filename = os.path.abspath(filename)
+        return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
+                                                        self._func.__name__))
+
+    def __call__(self, *args, **kwargs):
+        return self._func(*args, **kwargs)
+
+    def __getattr__(self, attr):
+        return getattr(self._func, attr)
+
+    def __repr__(self):
+        return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
+                                           self._func.__name__)
+
+class UnboundSelf:
+    def __init__(self, cls):
+        self.cls = cls
+
+    # We have to do this hackery because Python won't let us override the
+    # __class__ attribute...
+    def __getattribute__(self, attr):
+        if attr == '__class__':
+            return self.cls
+        else:
+            return object.__getattribute__(self, attr)
+
+def unbound_method(cls, func):
+    if inspect.ismethod(func):
+        return func
+    if not inspect.isfunction(func):
+        raise TypeError('%s is not a function' % (repr(func),))
+    return UnboundMethod(cls, func)
+
+def ismethod(obj):
+    return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
+
+
+# Make a pseudo-bytes function that can be called without the encoding arg:
+if sys.version_info >= (3, 0):
+    def bytes_(s, encoding='utf8'):
+        if isinstance(s, bytes):
+            return s
+        return bytes(s, encoding)
+else:
+    def bytes_(s, encoding=None):
+        return str(s)
diff --git a/nose/result.py b/nose/result.py
new file mode 100644 (file)
index 0000000..1267ba2
--- /dev/null
@@ -0,0 +1,200 @@
+"""
+Test Result
+-----------
+
+Provides a TextTestResult that extends unittest's _TextTestResult to
+provide support for error classes (such as the builtin skip and
+deprecated classes), and hooks for plugins to take over or extend
+reporting.
+"""
+
+import logging
+try:
+    # 2.7+
+    from unittest.runner import _TextTestResult
+except ImportError:
+    from unittest import _TextTestResult
+from nose.config import Config
+from nose.util import isclass, ln as _ln # backwards compat
+
+log = logging.getLogger('nose.result')
+
+
+def _exception_detail(exc):
+    # this is what stdlib module traceback does
+    try:
+        return str(exc)
+    except:
+        return '<unprintable %s object>' % type(exc).__name__
+
+
+class TextTestResult(_TextTestResult):
+    """Text test result that extends unittest's default test result
+    support for a configurable set of errorClasses (eg, Skip,
+    Deprecated, TODO) that extend the errors/failures/success triad.
+    """
+    def __init__(self, stream, descriptions, verbosity, config=None,
+                 errorClasses=None):
+        if errorClasses is None:
+            errorClasses = {}
+        self.errorClasses = errorClasses
+        if config is None:
+            config = Config()
+        self.config = config
+        _TextTestResult.__init__(self, stream, descriptions, verbosity)
+
+    def addSkip(self, test, reason):
+        # 2.7 skip compat
+        from nose.plugins.skip import SkipTest
+        if SkipTest in self.errorClasses:
+            storage, label, isfail = self.errorClasses[SkipTest]
+            storage.append((test, reason))
+            self.printLabel(label, (SkipTest, reason, None))
+
+    def addError(self, test, err):
+        """Overrides normal addError to add support for
+        errorClasses. If the exception is a registered class, the
+        error will be added to the list for that class, not errors.
+        """
+        ec, ev, tb = err
+        try:
+            exc_info = self._exc_info_to_string(err, test)
+        except TypeError:
+            # 2.3 compat
+            exc_info = self._exc_info_to_string(err)
+        for cls, (storage, label, isfail) in self.errorClasses.items():
+            #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
+            #    from nose.tools import set_trace
+            #    set_trace()
+            if isclass(ec) and issubclass(ec, cls):
+                if isfail:
+                    test.passed = False
+                storage.append((test, exc_info))
+                self.printLabel(label, err)
+                return
+        self.errors.append((test, exc_info))
+        test.passed = False
+        self.printLabel('ERROR')
+
+    # override to bypass changes in 2.7
+    def getDescription(self, test):
+        if self.descriptions:
+            return test.shortDescription() or str(test)
+        else:
+            return str(test)
+
+    def printLabel(self, label, err=None):
+        # Might get patched into a streamless result
+        stream = getattr(self, 'stream', None)
+        if stream is not None:
+            if self.showAll:
+                message = [label]
+                if err:
+                    detail = _exception_detail(err[1])
+                    if detail:
+                        message.append(detail)
+                stream.writeln(": ".join(message))
+            elif self.dots:
+                stream.write(label[:1])
+
+    def printErrors(self):
+        """Overrides to print all errorClasses errors as well.
+        """
+        _TextTestResult.printErrors(self)
+        for cls in self.errorClasses.keys():
+            storage, label, isfail = self.errorClasses[cls]
+            if isfail:
+                self.printErrorList(label, storage)
+        # Might get patched into a result with no config
+        if hasattr(self, 'config'):
+            self.config.plugins.report(self.stream)
+
+    def printSummary(self, start, stop):
+        """Called by the test runner to print the final summary of test
+        run results.
+        """
+        write = self.stream.write
+        writeln = self.stream.writeln
+        taken = float(stop - start)
+        run = self.testsRun
+        plural = run != 1 and "s" or ""
+
+        writeln(self.separator2)
+        writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
+        writeln()
+
+        summary = {}
+        eckeys = self.errorClasses.keys()
+        for cls in eckeys:
+            storage, label, isfail = self.errorClasses[cls]
+            count = len(storage)
+            if not count:
+                continue
+            summary[label] = count
+        if len(self.failures):
+            summary['failures'] = len(self.failures)
+        if len(self.errors):
+            summary['errors'] = len(self.errors)
+
+        if not self.wasSuccessful():
+            write("FAILED")
+        else:
+            write("OK")
+        items = summary.items()
+        if items:
+            items.sort()
+            write(" (")
+            write(", ".join(["%s=%s" % (label, count) for
+                             label, count in items]))
+            writeln(")")
+        else:
+            writeln()
+
+    def wasSuccessful(self):
+        """Overrides to check that there are no errors in errorClasses
+        lists that are marked as errors and should cause a run to
+        fail.
+        """
+        if self.errors or self.failures:
+            return False
+        for cls in self.errorClasses.keys():
+            storage, label, isfail = self.errorClasses[cls]
+            if not isfail:
+                continue
+            if storage:
+                return False
+        return True
+
+    def _addError(self, test, err):
+        try:
+            exc_info = self._exc_info_to_string(err, test)
+        except TypeError:
+            # 2.3: does not take test arg
+            exc_info = self._exc_info_to_string(err)
+        self.errors.append((test, exc_info))
+        if self.showAll:
+            self.stream.write('ERROR')
+        elif self.dots:
+            self.stream.write('E')
+
+    def _exc_info_to_string(self, err, test=None):
+        # 2.7 skip compat
+        from nose.plugins.skip import SkipTest
+        if issubclass(err[0], SkipTest):
+            return str(err[1])
+        # 2.3/2.4 -- 2.4 passes test, 2.3 does not
+        try:
+            return _TextTestResult._exc_info_to_string(self, err, test)
+        except TypeError:
+            # 2.3: does not take test arg
+            return _TextTestResult._exc_info_to_string(self, err)
+
+
+def ln(*arg, **kw):
+    from warnings import warn
+    warn("ln() has moved to nose.util from nose.result and will be removed "
+         "from nose.result in a future release. Please update your imports ",
+         DeprecationWarning)
+    return _ln(*arg, **kw)
+
+
diff --git a/nose/selector.py b/nose/selector.py
new file mode 100644 (file)
index 0000000..c4a006a
--- /dev/null
@@ -0,0 +1,251 @@
+"""
+Test Selection
+--------------
+
+Test selection is handled by a Selector. The test loader calls the
+appropriate selector method for each object it encounters that it
+thinks may be a test.
+"""
+import logging
+import os
+import unittest
+from nose.config import Config
+from nose.util import split_test_name, src, getfilename, getpackage, ispackage
+
+log = logging.getLogger(__name__)
+
+__all__ = ['Selector', 'defaultSelector', 'TestAddress']
+
+
+# for efficiency and easier mocking
+op_join = os.path.join
+op_basename = os.path.basename
+op_exists = os.path.exists
+op_splitext = os.path.splitext
+op_isabs = os.path.isabs
+op_abspath = os.path.abspath
+
+
+class Selector(object):
+    """Core test selector. Examines test candidates and determines whether,
+    given the specified configuration, the test candidate should be selected
+    as a test.
+    """
+    def __init__(self, config):
+        if config is None:
+            config = Config()
+        self.configure(config)
+
+    def configure(self, config):
+        self.config = config
+        self.exclude = config.exclude
+        self.ignoreFiles = config.ignoreFiles
+        self.include = config.include
+        self.plugins = config.plugins
+        self.match = config.testMatch
+        
+    def matches(self, name):
+        """Does the name match my requirements?
+
+        To match, a name must match config.testMatch OR config.include
+        and it must not match config.exclude
+        """
+        return ((self.match.search(name)
+                 or (self.include and
+                     filter(None,
+                            [inc.search(name) for inc in self.include])))
+                and ((not self.exclude)
+                     or not filter(None,
+                                   [exc.search(name) for exc in self.exclude])
+                 ))
+    
+    def wantClass(self, cls):
+        """Is the class a wanted test class?
+
+        A class must be a unittest.TestCase subclass, or match test name
+        requirements. Classes that start with _ are always excluded.
+        """
+        declared = getattr(cls, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = (not cls.__name__.startswith('_')
+                      and (issubclass(cls, unittest.TestCase)
+                           or self.matches(cls.__name__)))
+        
+        plug_wants = self.plugins.wantClass(cls)        
+        if plug_wants is not None:
+            log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
+            wanted = plug_wants
+        log.debug("wantClass %s? %s", cls, wanted)
+        return wanted
+
+    def wantDirectory(self, dirname):
+        """Is the directory a wanted test directory?
+
+        All package directories match, so long as they do not match exclude. 
+        All other directories must match test requirements.
+        """
+        tail = op_basename(dirname)
+        if ispackage(dirname):
+            wanted = (not self.exclude
+                      or not filter(None,
+                                    [exc.search(tail) for exc in self.exclude]
+                                    ))
+        else:
+            wanted = (self.matches(tail)
+                      or (self.config.srcDirs
+                          and tail in self.config.srcDirs))
+        plug_wants = self.plugins.wantDirectory(dirname)
+        if plug_wants is not None:
+            log.debug("Plugin setting selection of %s to %s",
+                      dirname, plug_wants)
+            wanted = plug_wants
+        log.debug("wantDirectory %s? %s", dirname, wanted)
+        return wanted
+    
+    def wantFile(self, file):
+        """Is the file a wanted test file?
+
+        The file must be a python source file and match testMatch or
+        include, and not match exclude. Files that match ignore are *never*
+        wanted, regardless of plugin, testMatch, include or exclude settings.
+        """
+        # never, ever load files that match anything in ignore
+        # (.* _* and *setup*.py by default)
+        base = op_basename(file)
+        ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
+                           if ignore_this.search(base) ]
+        if ignore_matches:
+            log.debug('%s matches ignoreFiles pattern; skipped',
+                      base) 
+            return False
+        if not self.config.includeExe and os.access(file, os.X_OK):
+            log.info('%s is executable; skipped', file)
+            return False
+        dummy, ext = op_splitext(base)
+        pysrc = ext == '.py'
+
+        wanted = pysrc and self.matches(base) 
+        plug_wants = self.plugins.wantFile(file)
+        if plug_wants is not None:
+            log.debug("plugin setting want %s to %s", file, plug_wants)
+            wanted = plug_wants
+        log.debug("wantFile %s? %s", file, wanted)
+        return wanted
+
+    def wantFunction(self, function):
+        """Is the function a test function?
+        """
+        try:
+            if hasattr(function, 'compat_func_name'):
+                funcname = function.compat_func_name
+            else:
+                funcname = function.__name__
+        except AttributeError:
+            # not a function
+            return False
+        declared = getattr(function, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = not funcname.startswith('_') and self.matches(funcname)
+        plug_wants = self.plugins.wantFunction(function)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantFunction %s? %s", function, wanted)
+        return wanted
+
+    def wantMethod(self, method):
+        """Is the method a test method?
+        """
+        try:
+            method_name = method.__name__
+        except AttributeError:
+            # not a method
+            return False
+        if method_name.startswith('_'):
+            # never collect 'private' methods
+            return False
+        declared = getattr(method, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = self.matches(method_name)
+        plug_wants = self.plugins.wantMethod(method)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantMethod %s? %s", method, wanted)
+        return wanted
+    
+    def wantModule(self, module):
+        """Is the module a test module?
+
+        The tail of the module name must match test requirements. One exception:
+        we always want __main__.
+        """
+        declared = getattr(module, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = self.matches(module.__name__.split('.')[-1]) \
+                     or module.__name__ == '__main__'
+        plug_wants = self.plugins.wantModule(module)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantModule %s? %s", module, wanted)
+        return wanted
+        
+defaultSelector = Selector        
+
+
+class TestAddress(object):
+    """A test address represents a user's request to run a particular
+    test. The user may specify a filename or module (or neither),
+    and/or a callable (a class, function, or method). The naming
+    format for test addresses is:
+
+    filename_or_module:callable
+
+    Filenames that are not absolute will be made absolute relative to
+    the working dir.
+
+    The filename or module part will be considered a module name if it
+    doesn't look like a file, that is, if it doesn't exist on the file
+    system and it doesn't contain any directory separators and it
+    doesn't end in .py.
+
+    Callables may be a class name, function name, method name, or
+    class.method specification.
+    """
+    def __init__(self, name, workingDir=None):
+        if workingDir is None:
+            workingDir = os.getcwd()
+        self.name = name
+        self.workingDir = workingDir
+        self.filename, self.module, self.call = split_test_name(name)
+        log.debug('Test name %s resolved to file %s, module %s, call %s',
+                  name, self.filename, self.module, self.call)
+        if self.filename is None:
+            if self.module is not None:
+                self.filename = getfilename(self.module, self.workingDir)
+        if self.filename:
+            self.filename = src(self.filename)
+            if not op_isabs(self.filename):
+                self.filename = op_abspath(op_join(workingDir,
+                                                   self.filename))
+            if self.module is None:
+                self.module = getpackage(self.filename)
+        log.debug(
+            'Final resolution of test name %s: file %s module %s call %s',
+            name, self.filename, self.module, self.call)
+
+    def totuple(self):
+        return (self.filename, self.module, self.call)
+        
+    def __str__(self):
+        return self.name
+
+    def __repr__(self):
+        return "%s: (%s, %s, %s)" % (self.name, self.filename,
+                                     self.module, self.call)
diff --git a/nose/sphinx/__init__.py b/nose/sphinx/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/nose/sphinx/pluginopts.py b/nose/sphinx/pluginopts.py
new file mode 100644 (file)
index 0000000..9b88eeb
--- /dev/null
@@ -0,0 +1,186 @@
+"""
+Adds a sphinx directive that can be used to automatically document a plugin.
+
+this::
+
+ .. autoplugin :: nose.plugins.foo
+    :plugin: Pluggy
+    
+produces::
+
+  .. automodule :: nose.plugins.foo
+  
+  Options
+  -------
+
+  .. cmdoption :: --foo=BAR, --fooble=BAR
+
+    Do the foo thing to the new thing.
+
+  Plugin
+  ------
+
+  .. autoclass :: nose.plugins.foo.Pluggy
+     :members:
+
+  Source
+  ------
+
+  .. include :: path/to/nose/plugins/foo.py
+     :literal:
+
+"""
+import os
+try:
+    from docutils import nodes
+    from docutils.statemachine import ViewList
+    from docutils.parsers.rst import directives
+except ImportError:
+    pass # won't run anyway
+
+from nose.util import resolve_name
+from nose.plugins.base import Plugin
+from nose.plugins.manager import BuiltinPluginManager
+from nose.config import Config
+from nose.core import TestProgram
+from inspect import isclass
+
+def autoplugin_directive(dirname, arguments, options, content, lineno,
+                         content_offset, block_text, state, state_machine):
+    mod_name = arguments[0]
+    mod = resolve_name(mod_name)
+    plug_name = options.get('plugin', None)
+    if plug_name:
+        obj = getattr(mod, plug_name)
+    else:
+        for entry in dir(mod):
+            obj = getattr(mod, entry)
+            if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
+                plug_name = '%s.%s' % (mod_name, entry)
+                break
+    
+    # mod docstring
+    rst = ViewList()
+    rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    # options
+    rst.append('Options', '<autodoc>')
+    rst.append('-------', '<autodoc>')
+    rst.append('', '<autodoc>')
+
+    plug = obj()
+    opts = OptBucket()
+    plug.options(opts, {})
+    for opt in opts:
+        rst.append(opt.options(), '<autodoc>')
+        rst.append('   \n', '<autodoc>')
+        rst.append('   ' + opt.help + '\n', '<autodoc>')
+        rst.append('\n', '<autodoc>')
+        
+    # plugin class
+    rst.append('Plugin', '<autodoc>')
+    rst.append('------', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
+    rst.append('   :members:\n', '<autodoc>')
+    rst.append('   :show-inheritance:\n', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    # source
+    rst.append('Source', '<autodoc>')
+    rst.append('------', '<autodoc>')
+    rst.append('.. include :: %s\n' % os.path.relpath(
+            mod.__file__.replace('.pyc', '.py'), os.getcwd()),
+               '<autodoc>')
+    rst.append('   :literal:\n', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    node = nodes.section()
+    node.document = state.document
+    surrounding_title_styles = state.memo.title_styles
+    surrounding_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    state.nested_parse(rst, 0, node, match_titles=1)
+    state.memo.title_styles = surrounding_title_styles
+    state.memo.section_level = surrounding_section_level
+
+    return node.children
+
+
+def autohelp_directive(dirname, arguments, options, content, lineno,
+                       content_offset, block_text, state, state_machine):
+    """produces rst from nose help"""
+    config = Config(parserClass=OptBucket,
+                    plugins=BuiltinPluginManager())
+    parser = config.getParser(TestProgram.usage())
+    rst = ViewList()
+    for line in parser.format_help().split('\n'):
+        rst.append(line, '<autodoc>')
+
+    rst.append('Options', '<autodoc>')
+    rst.append('-------', '<autodoc>')
+    rst.append('', '<autodoc>')
+    for opt in parser:
+        rst.append(opt.options(), '<autodoc>')
+        rst.append('   \n', '<autodoc>')
+        rst.append('   ' + opt.help + '\n', '<autodoc>')
+        rst.append('\n', '<autodoc>')    
+    node = nodes.section()
+    node.document = state.document
+    surrounding_title_styles = state.memo.title_styles
+    surrounding_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    state.nested_parse(rst, 0, node, match_titles=1)
+    state.memo.title_styles = surrounding_title_styles
+    state.memo.section_level = surrounding_section_level
+
+    return node.children
+
+    
+class OptBucket(object):
+    def __init__(self, doc=None, prog='nosetests'):
+        self.opts = []
+        self.doc = doc
+        self.prog = prog
+
+    def __iter__(self):
+        return iter(self.opts)
+
+    def format_help(self):
+        return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
+    
+    def add_option(self, *arg, **kw):
+        self.opts.append(Opt(*arg, **kw))
+
+
+class Opt(object):
+    def __init__(self, *arg, **kw):
+        self.opts = arg
+        self.action = kw.pop('action', None)
+        self.default = kw.pop('default', None)
+        self.metavar = kw.pop('metavar', None)
+        self.help = kw.pop('help', None)
+
+    def options(self):
+        buf = []
+        for optstring in self.opts:
+            desc = optstring
+            if self.action not in ('store_true', 'store_false'):
+                desc += '=%s' % self.meta(optstring)
+            buf.append(desc)
+        return '.. cmdoption :: ' + ', '.join(buf)
+
+    def meta(self, optstring):
+        # FIXME optparser default metavar?
+        return self.metavar or 'DEFAULT'
+
+    
+def setup(app):
+    app.add_directive('autoplugin',
+                      autoplugin_directive, 1, (1, 0, 1),
+                      plugin=directives.unchanged)
+    app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/nose/suite.py b/nose/suite.py
new file mode 100644 (file)
index 0000000..3b68b23
--- /dev/null
@@ -0,0 +1,607 @@
+"""
+Test Suites
+-----------
+
+Provides a LazySuite, which is a suite whose test list is a generator
+function, and ContextSuite,which can run fixtures (setup/teardown
+functions or methods) for the context that contains its tests.
+
+"""
+from __future__ import generators
+
+import logging
+import sys
+import unittest
+from nose.case import Test
+from nose.config import Config
+from nose.proxy import ResultProxyFactory
+from nose.util import isclass, resolve_name, try_run
+
+if sys.platform == 'cli':
+    if sys.version_info[:2] < (2, 6):
+        import clr
+        clr.AddReference("IronPython")
+        from IronPython.Runtime.Exceptions import StringException
+    else:
+        class StringException(Exception):
+            pass
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# Singleton for default value -- see ContextSuite.__init__ below
+_def = object()
+
+
+def _strclass(cls):
+    return "%s.%s" % (cls.__module__, cls.__name__)
+
+class MixedContextError(Exception):
+    """Error raised when a context suite sees tests from more than
+    one context.
+    """
+    pass
+
+
+class LazySuite(unittest.TestSuite):
+    """A suite that may use a generator as its list of tests
+    """
+    def __init__(self, tests=()):
+        """Initialize the suite. tests may be an iterable or a generator
+        """
+        self._set_tests(tests)
+
+    def __iter__(self):
+        return iter(self._tests)
+
+    def __repr__(self):
+        return "<%s tests=generator (%s)>" % (
+            _strclass(self.__class__), id(self))
+
+    def __hash__(self):
+        return object.__hash__(self)
+
+    __str__ = __repr__
+
+    def addTest(self, test):
+        self._precache.append(test)
+
+    # added to bypass run changes in 2.7's unittest
+    def run(self, result):
+        for test in self._tests:
+            if result.shouldStop:
+                break
+            test(result)
+        return result
+
+    def __nonzero__(self):
+        log.debug("tests in %s?", id(self))
+        if self._precache:
+            return True
+        if self.test_generator is None:
+            return False
+        try:
+            test = self.test_generator.next()
+            if test is not None:
+                self._precache.append(test)
+                return True
+        except StopIteration:
+            pass
+        return False
+
+    def _get_tests(self):
+        log.debug("precache is %s", self._precache)
+        for test in self._precache:
+            yield test
+        if self.test_generator is None:
+            return
+        for test in self.test_generator:
+            yield test
+
+    def _set_tests(self, tests):
+        self._precache = []
+        is_suite = isinstance(tests, unittest.TestSuite)
+        if callable(tests) and not is_suite:
+            self.test_generator = tests()
+        elif is_suite:
+            # Suites need special treatment: they must be called like
+            # tests for their setup/teardown to run (if any)
+            self.addTests([tests])
+            self.test_generator = None
+        else:
+            self.addTests(tests)
+            self.test_generator = None
+
+    _tests = property(_get_tests, _set_tests, None,
+                      "Access the tests in this suite. Access is through a "
+                      "generator, so iteration may not be repeatable.")
+
+
+class ContextSuite(LazySuite):
+    """A suite with context.
+
+    A ContextSuite executes fixtures (setup and teardown functions or
+    methods) for the context containing its tests.
+
+    The context may be explicitly passed. If it is not, a context (or
+    nested set of contexts) will be constructed by examining the tests
+    in the suite.
+    """
+    failureException = unittest.TestCase.failureException
+    was_setup = False
+    was_torndown = False
+    classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
+                     'setUpClass', 'setUpAll')
+    classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
+                     'teardownAll', 'tearDownClass', 'tearDownAll')
+    moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
+                   'setUp')
+    moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
+                      'teardown', 'tearDown')
+    packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
+    packageTeardown = ('teardown_package', 'teardownPackage',
+                       'tearDownPackage')
+
+    def __init__(self, tests=(), context=None, factory=None,
+                 config=None, resultProxy=None, can_split=True):
+        log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
+        self.context = context
+        self.factory = factory
+        if config is None:
+            config = Config()
+        self.config = config
+        self.resultProxy = resultProxy
+        self.has_run = False
+        self.can_split = can_split
+        self.error_context = None
+        LazySuite.__init__(self, tests)
+
+    def __repr__(self):
+        return "<%s context=%s>" % (
+            _strclass(self.__class__),
+            getattr(self.context, '__name__', self.context))
+    __str__ = __repr__
+
+    def id(self):
+        if self.error_context:
+            return '%s:%s' % (repr(self), self.error_context)
+        else:
+            return repr(self)
+
+    def __hash__(self):
+        return object.__hash__(self)
+
+    # 2.3 compat -- force 2.4 call sequence
+    def __call__(self, *arg, **kw):
+        return self.run(*arg, **kw)
+
+    def exc_info(self):
+        """Hook for replacing error tuple output
+        """
+        return sys.exc_info()
+
+    def _exc_info(self):
+        """Bottleneck to fix up IronPython string exceptions
+        """
+        e = self.exc_info()
+        if sys.platform == 'cli':
+            if isinstance(e[0], StringException):
+                # IronPython throws these StringExceptions, but
+                # traceback checks type(etype) == str. Make a real
+                # string here.
+                e = (str(e[0]), e[1], e[2])
+
+        return e
+
+    def run(self, result):
+        """Run tests in suite inside of suite fixtures.
+        """
+        # proxy the result for myself
+        log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
+        #import pdb
+        #pdb.set_trace()
+        if self.resultProxy:
+            result, orig = self.resultProxy(result, self), result
+        else:
+            result, orig = result, result
+        try:
+            self.setUp()
+        except KeyboardInterrupt:
+            raise
+        except:
+            self.error_context = 'setup'
+            result.addError(self, self._exc_info())
+            return
+        try:
+            for test in self._tests:
+                if result.shouldStop:
+                    log.debug("stopping")
+                    break
+                # each nose.case.Test will create its own result proxy
+                # so the cases need the original result, to avoid proxy
+                # chains
+                test(orig)
+        finally:
+            self.has_run = True
+            try:
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.error_context = 'teardown'
+                result.addError(self, self._exc_info())
+
+    def hasFixtures(self, ctx_callback=None):
+        context = self.context
+        if context is None:
+            return False
+        if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
+            return True
+        # My context doesn't have any, but its ancestors might
+        factory = self.factory
+        if factory:
+            ancestors = factory.context.get(self, [])
+            for ancestor in ancestors:
+                if self.implementsAnyFixture(
+                    ancestor, ctx_callback=ctx_callback):
+                    return True
+        return False
+
+    def implementsAnyFixture(self, context, ctx_callback):
+        if isclass(context):
+            names = self.classSetup + self.classTeardown
+        else:
+            names = self.moduleSetup + self.moduleTeardown
+            if hasattr(context, '__path__'):
+                names += self.packageSetup + self.packageTeardown
+        # If my context has any fixture attribute, I have fixtures
+        fixt = False
+        for m in names:
+            if hasattr(context, m):
+                fixt = True
+                break
+        if ctx_callback is None:
+            return fixt
+        return ctx_callback(context, fixt)
+
+    def setUp(self):
+        log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
+        if not self:
+            # I have no tests
+            log.debug("suite %s has no tests", id(self))
+            return
+        if self.was_setup:
+            log.debug("suite %s already set up", id(self))
+            return
+        context = self.context
+        if context is None:
+            return
+        # before running my own context's setup, I need to
+        # ask the factory if my context's contexts' setups have been run
+        factory = self.factory
+        if factory:
+            # get a copy, since we'll be destroying it as we go
+            ancestors = factory.context.get(self, [])[:]
+            while ancestors:
+                ancestor = ancestors.pop()
+                log.debug("ancestor %s may need setup", ancestor)
+                if ancestor in factory.was_setup:
+                    continue
+                log.debug("ancestor %s does need setup", ancestor)
+                self.setupContext(ancestor)
+            if not context in factory.was_setup:
+                self.setupContext(context)
+        else:
+            self.setupContext(context)
+        self.was_setup = True
+        log.debug("completed suite setup")
+
+    def setupContext(self, context):
+        self.config.plugins.startContext(context)
+        log.debug("%s setup context %s", self, context)
+        if self.factory:
+            if context in self.factory.was_setup:
+                return
+            # note that I ran the setup for this context, so that I'll run
+            # the teardown in my teardown
+            self.factory.was_setup[context] = self
+        if isclass(context):
+            names = self.classSetup
+        else:
+            names = self.moduleSetup
+            if hasattr(context, '__path__'):
+                names = self.packageSetup + names
+        try_run(context, names)
+
+    def shortDescription(self):
+        if self.context is None:
+            return "test suite"
+        return "test suite for %s" % self.context
+
+    def tearDown(self):
+        log.debug('context teardown')
+        if not self.was_setup or self.was_torndown:
+            log.debug(
+                "No reason to teardown (was_setup? %s was_torndown? %s)"
+                % (self.was_setup, self.was_torndown))
+            return
+        self.was_torndown = True
+        context = self.context
+        if context is None:
+            log.debug("No context to tear down")
+            return
+
+        # for each ancestor... if the ancestor was setup
+        # and I did the setup, I can do teardown
+        factory = self.factory
+        if factory:
+            ancestors = factory.context.get(self, []) + [context]
+            for ancestor in ancestors:
+                log.debug('ancestor %s may need teardown', ancestor)
+                if not ancestor in factory.was_setup:
+                    log.debug('ancestor %s was not setup', ancestor)
+                    continue
+                if ancestor in factory.was_torndown:
+                    log.debug('ancestor %s already torn down', ancestor)
+                    continue
+                setup = factory.was_setup[ancestor]
+                log.debug("%s setup ancestor %s", setup, ancestor)
+                if setup is self:
+                    self.teardownContext(ancestor)
+        else:
+            self.teardownContext(context)
+
+    def teardownContext(self, context):
+        log.debug("%s teardown context %s", self, context)
+        if self.factory:
+            if context in self.factory.was_torndown:
+                return
+            self.factory.was_torndown[context] = self
+        if isclass(context):
+            names = self.classTeardown
+        else:
+            names = self.moduleTeardown
+            if hasattr(context, '__path__'):
+                names = self.packageTeardown + names
+        try_run(context, names)
+        self.config.plugins.stopContext(context)
+
+    # FIXME the wrapping has to move to the factory?
+    def _get_wrapped_tests(self):
+        for test in self._get_tests():
+            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+                yield test
+            else:
+                yield Test(test,
+                           config=self.config,
+                           resultProxy=self.resultProxy)
+
+    _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
+                      "Access the tests in this suite. Tests are returned "
+                      "inside of a context wrapper.")
+
+
+class ContextSuiteFactory(object):
+    """Factory for ContextSuites. Called with a collection of tests,
+    the factory decides on a hierarchy of contexts by introspecting
+    the collection or the tests themselves to find the objects
+    containing the test objects. It always returns one suite, but that
+    suite may consist of a hierarchy of nested suites.
+    """
+    suiteClass = ContextSuite
+    def __init__(self, config=None, suiteClass=None, resultProxy=_def):
+        if config is None:
+            config = Config()
+        self.config = config
+        if suiteClass is not None:
+            self.suiteClass = suiteClass
+        # Using a singleton to represent default instead of None allows
+        # passing resultProxy=None to turn proxying off.
+        if resultProxy is _def:
+            resultProxy = ResultProxyFactory(config=config)
+        self.resultProxy = resultProxy
+        self.suites = {}
+        self.context = {}
+        self.was_setup = {}
+        self.was_torndown = {}
+
+    def __call__(self, tests, **kw):
+        """Return ``ContextSuite`` for tests. ``tests`` may either
+        be a callable (in which case the resulting ContextSuite will
+        have no parent context and be evaluated lazily) or an
+        iterable. In that case the tests will wrapped in
+        nose.case.Test, be examined and the context of each found and a
+        suite of suites returned, organized into a stack with the
+        outermost suites belonging to the outermost contexts.
+        """
+        log.debug("Create suite for %s", tests)
+        context = kw.pop('context', getattr(tests, 'context', None))
+        log.debug("tests %s context %s", tests, context)
+        if context is None:
+            tests = self.wrapTests(tests)
+            try:
+                context = self.findContext(tests)
+            except MixedContextError:
+                return self.makeSuite(self.mixedSuites(tests), None, **kw)
+        return self.makeSuite(tests, context, **kw)
+
+    def ancestry(self, context):
+        """Return the ancestry of the context (that is, all of the
+        packages and modules containing the context), in order of
+        descent with the outermost ancestor last.
+        This method is a generator.
+        """
+        log.debug("get ancestry %s", context)
+        if context is None:
+            return
+        # Methods include reference to module they are defined in, we
+        # don't want that, instead want the module the class is in now
+        # (classes are re-ancestored elsewhere).
+        if hasattr(context, 'im_class'):
+            context = context.im_class
+        elif hasattr(context, '__self__'):
+            context = context.__self__.__class__
+        if hasattr(context, '__module__'):
+            ancestors = context.__module__.split('.')
+        elif hasattr(context, '__name__'):
+            ancestors = context.__name__.split('.')[:-1]
+        else:
+            raise TypeError("%s has no ancestors?" % context)
+        while ancestors:
+            log.debug(" %s ancestors %s", context, ancestors)
+            yield resolve_name('.'.join(ancestors))
+            ancestors.pop()
+
+    def findContext(self, tests):
+        if callable(tests) or isinstance(tests, unittest.TestSuite):
+            return None
+        context = None
+        for test in tests:
+            # Don't look at suites for contexts, only tests
+            ctx = getattr(test, 'context', None)
+            if ctx is None:
+                continue
+            if context is None:
+                context = ctx
+            elif context != ctx:
+                raise MixedContextError(
+                    "Tests with different contexts in same suite! %s != %s"
+                    % (context, ctx))
+        return context
+
+    def makeSuite(self, tests, context, **kw):
+        suite = self.suiteClass(
+            tests, context=context, config=self.config, factory=self,
+            resultProxy=self.resultProxy, **kw)
+        if context is not None:
+            self.suites.setdefault(context, []).append(suite)
+            self.context.setdefault(suite, []).append(context)
+            log.debug("suite %s has context %s", suite,
+                      getattr(context, '__name__', None))
+            for ancestor in self.ancestry(context):
+                self.suites.setdefault(ancestor, []).append(suite)
+                self.context[suite].append(ancestor)
+                log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
+        return suite
+
+    def mixedSuites(self, tests):
+        """The complex case where there are tests that don't all share
+        the same context. Groups tests into suites with common ancestors,
+        according to the following (essentially tail-recursive) procedure:
+
+        Starting with the context of the first test, if it is not
+        None, look for tests in the remaining tests that share that
+        ancestor. If any are found, group into a suite with that
+        ancestor as the context, and replace the current suite with
+        that suite. Continue this process for each ancestor of the
+        first test, until all ancestors have been processed. At this
+        point if any tests remain, recurse with those tests as the
+        input, returning a list of the common suite (which may be the
+        suite or test we started with, if no common tests were found)
+        plus the results of recursion.
+        """
+        if not tests:
+            return []
+        head = tests.pop(0)
+        if not tests:
+            return [head] # short circuit when none are left to combine
+        suite = head # the common ancestry suite, so far
+        tail = tests[:]
+        context = getattr(head, 'context', None)
+        if context is not None:
+            ancestors = [context] + [a for a in self.ancestry(context)]
+            for ancestor in ancestors:
+                common = [suite] # tests with ancestor in common, so far
+                remain = [] # tests that remain to be processed
+                for test in tail:
+                    found_common = False
+                    test_ctx = getattr(test, 'context', None)
+                    if test_ctx is None:
+                        remain.append(test)
+                        continue
+                    if test_ctx is ancestor:
+                        common.append(test)
+                        continue
+                    for test_ancestor in self.ancestry(test_ctx):
+                        if test_ancestor is ancestor:
+                            common.append(test)
+                            found_common = True
+                            break
+                    if not found_common:
+                        remain.append(test)
+                if common:
+                    suite = self.makeSuite(common, ancestor)
+                tail = self.mixedSuites(remain)
+        return [suite] + tail
+
+    def wrapTests(self, tests):
+        log.debug("wrap %s", tests)
+        if callable(tests) or isinstance(tests, unittest.TestSuite):
+            log.debug("I won't wrap")
+            return tests
+        wrapped = []
+        for test in tests:
+            log.debug("wrapping %s", test)
+            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+                wrapped.append(test)
+            elif isinstance(test, ContextList):
+                wrapped.append(self.makeSuite(test, context=test.context))
+            else:
+                wrapped.append(
+                    Test(test, config=self.config, resultProxy=self.resultProxy)
+                    )
+        return wrapped
+
+
+class ContextList(object):
+    """Not quite a suite -- a group of tests in a context. This is used
+    to hint the ContextSuiteFactory about what context the tests
+    belong to, in cases where it may be ambiguous or missing.
+    """
+    def __init__(self, tests, context=None):
+        self.tests = tests
+        self.context = context
+
+    def __iter__(self):
+        return iter(self.tests)
+
+
+class FinalizingSuiteWrapper(unittest.TestSuite):
+    """Wraps suite and calls final function after suite has
+    executed. Used to call final functions in cases (like running in
+    the standard test runner) where test running is not under nose's
+    control.
+    """
+    def __init__(self, suite, finalize):
+        self.suite = suite
+        self.finalize = finalize
+
+    def __call__(self, *arg, **kw):
+        return self.run(*arg, **kw)
+
+    # 2.7 compat
+    def __iter__(self):
+        return iter(self.suite)
+
+    def run(self, *arg, **kw):
+        try:
+            return self.suite(*arg, **kw)
+        finally:
+            self.finalize(*arg, **kw)
+
+
+# backwards compat -- sort of
+class TestDir:
+    def __init__(*arg, **kw):
+        raise NotImplementedError(
+            "TestDir is not usable with nose 0.10. The class is present "
+            "in nose.suite for backwards compatibility purposes but it "
+            "may not be used.")
+
+
+class TestModule:
+    def __init__(*arg, **kw):
+        raise NotImplementedError(
+            "TestModule is not usable with nose 0.10. The class is present "
+            "in nose.suite for backwards compatibility purposes but it "
+            "may not be used.")
diff --git a/nose/tools.py b/nose/tools.py
new file mode 100644 (file)
index 0000000..2fb3e75
--- /dev/null
@@ -0,0 +1,194 @@
+"""
+Tools for testing
+-----------------
+
+nose.tools provides a few convenience functions to make writing tests
+easier. You don't have to use them; nothing in the rest of nose depends
+on any of these methods.
+"""
+import re
+import time
+import unittest
+
+
+__all__ = ['ok_', 'eq_', 'make_decorator', 'raises', 'set_trace', 'timed',
+            'with_setup', 'TimeExpired', 'istest', 'nottest']
+
+
+class TimeExpired(AssertionError):
+    pass
+
+
+def ok_(expr, msg=None):
+    """Shorthand for assert. Saves 3 whole characters!
+    """
+    assert expr, msg
+
+
+def eq_(a, b, msg=None):
+    """Shorthand for 'assert a == b, "%r != %r" % (a, b)
+    """
+    assert a == b, msg or "%r != %r" % (a, b)
+
+
+def make_decorator(func):
+    """
+    Wraps a test decorator so as to properly replicate metadata
+    of the decorated function, including nose's additional stuff
+    (namely, setup and teardown).
+    """
+    def decorate(newfunc):
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        newfunc.__dict__ = func.__dict__
+        newfunc.__doc__ = func.__doc__
+        newfunc.__module__ = func.__module__
+        if not hasattr(newfunc, 'compat_co_firstlineno'):
+            newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
+        try:
+            newfunc.__name__ = name
+        except TypeError:
+            # can't set func name in 2.3
+            newfunc.compat_func_name = name
+        return newfunc
+    return decorate
+
+
+def raises(*exceptions):
+    """Test must raise one of expected exceptions to pass.
+
+    Example use::
+
+      @raises(TypeError, ValueError)
+      def test_raises_type_error():
+          raise TypeError("This test passes")
+
+      @raises(Exception)
+      def test_that_fails_by_passing():
+          pass
+
+    If you want to test many assertions about exceptions in a single test,
+    you may want to use `assert_raises` instead.
+    """
+    valid = ' or '.join([e.__name__ for e in exceptions])
+    def decorate(func):
+        name = func.__name__
+        def newfunc(*arg, **kw):
+            try:
+                func(*arg, **kw)
+            except exceptions:
+                pass
+            except:
+                raise
+            else:
+                message = "%s() did not raise %s" % (name, valid)
+                raise AssertionError(message)
+        newfunc = make_decorator(func)(newfunc)
+        return newfunc
+    return decorate
+
+
+def set_trace():
+    """Call pdb.set_trace in the calling frame, first restoring
+    sys.stdout to the real output stream. Note that sys.stdout is NOT
+    reset to whatever it was before the call once pdb is done!
+    """
+    import pdb
+    import sys
+    stdout = sys.stdout
+    sys.stdout = sys.__stdout__
+    pdb.Pdb().set_trace(sys._getframe().f_back)
+    
+
+def timed(limit):
+    """Test must finish within specified time limit to pass.
+
+    Example use::
+
+      @timed(.1)
+      def test_that_fails():
+          time.sleep(.2)
+    """
+    def decorate(func):
+        def newfunc(*arg, **kw):
+            start = time.time()
+            func(*arg, **kw)
+            end = time.time()
+            if end - start > limit:
+                raise TimeExpired("Time limit (%s) exceeded" % limit)
+        newfunc = make_decorator(func)(newfunc)
+        return newfunc
+    return decorate
+
+
+def with_setup(setup=None, teardown=None):
+    """Decorator to add setup and/or teardown methods to a test function::
+
+      @with_setup(setup, teardown)
+      def test_something():
+          " ... "
+
+    Note that `with_setup` is useful *only* for test functions, not for test
+    methods or inside of TestCase subclasses.
+    """
+    def decorate(func, setup=setup, teardown=teardown):
+        if setup:
+            if hasattr(func, 'setup'):
+                _old_s = func.setup
+                def _s():
+                    setup()
+                    _old_s()
+                func.setup = _s
+            else:
+                func.setup = setup
+        if teardown:
+            if hasattr(func, 'teardown'):
+                _old_t = func.teardown
+                def _t():
+                    _old_t()
+                    teardown()
+                func.teardown = _t
+            else:
+                func.teardown = teardown
+        return func
+    return decorate
+
+
+def istest(func):
+    """Decorator to mark a function or method as a test
+    """
+    func.__test__ = True
+    return func
+
+
+def nottest(func):
+    """Decorator to mark a function or method as *not* a test
+    """
+    func.__test__ = False
+    return func
+
+#
+# Expose assert* from unittest.TestCase
+# - give them pep8 style names
+#
+caps = re.compile('([A-Z])')
+
+def pep8(name):
+    return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
+
+class Dummy(unittest.TestCase):
+    def nop():
+        pass
+_t = Dummy('nop')
+
+for at in [ at for at in dir(_t)
+            if at.startswith('assert') and not '_' in at ]:
+    pepd = pep8(at)
+    vars()[pepd] = getattr(_t, at)
+    __all__.append(pepd)
+
+del Dummy
+del _t
+del pep8
diff --git a/nose/twistedtools.py b/nose/twistedtools.py
new file mode 100644 (file)
index 0000000..3720610
--- /dev/null
@@ -0,0 +1,168 @@
+"""
+Twisted integration
+-------------------
+
+This module provides a very simple way to integrate your tests with the
+Twisted_ event loop.
+
+You must import this module *before* importing anything from Twisted itself!
+
+Example::
+
+  from nose.twistedtools import reactor, deferred
+  
+  @deferred()
+  def test_resolve():
+      return reactor.resolve("www.python.org")
+
+Or, more realistically::
+
+  @deferred(timeout=5.0)
+  def test_resolve():
+      d = reactor.resolve("www.python.org")
+      def check_ip(ip):
+          assert ip == "67.15.36.43"
+      d.addCallback(check_ip)
+      return d
+
+.. _Twisted: http://twistedmatrix.com/trac/
+"""
+
+import sys
+from Queue import Queue, Empty
+from nose.tools import make_decorator, TimeExpired
+
+__all__ = [
+    'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
+    'stop_reactor'
+]
+
+_twisted_thread = None
+
+def threaded_reactor():
+    """
+    Start the Twisted reactor in a separate thread, if not already done.
+    Returns the reactor.
+    The thread will automatically be destroyed when all the tests are done.
+    """
+    global _twisted_thread
+    try:
+        from twisted.internet import reactor
+    except ImportError:
+        return None, None
+    if not _twisted_thread:
+        from twisted.python import threadable
+        from threading import Thread
+        _twisted_thread = Thread(target=lambda: reactor.run( \
+                installSignalHandlers=False))
+        _twisted_thread.setDaemon(True)
+        _twisted_thread.start()
+    return reactor, _twisted_thread
+
+# Export global reactor variable, as Twisted does
+reactor, reactor_thread = threaded_reactor()
+
+
+def stop_reactor():
+    """Stop the reactor and join the reactor thread until it stops.
+    Call this function in teardown at the module or package level to
+    reset the twisted system after your tests. You *must* do this if
+    you mix tests using these tools and tests using twisted.trial.
+    """
+    global _twisted_thread
+    reactor.stop()
+    reactor_thread.join()
+    for p in reactor.getDelayedCalls():
+        if p.active():
+            p.cancel()
+    _twisted_thread = None
+
+
+def deferred(timeout=None):
+    """
+    By wrapping a test function with this decorator, you can return a
+    twisted Deferred and the test will wait for the deferred to be triggered.
+    The whole test function will run inside the Twisted event loop.
+
+    The optional timeout parameter specifies the maximum duration of the test.
+    The difference with timed() is that timed() will still wait for the test
+    to end, while deferred() will stop the test when its timeout has expired.
+    The latter is more desireable when dealing with network tests, because
+    the result may actually never arrive.
+
+    If the callback is triggered, the test has passed.
+    If the errback is triggered or the timeout expires, the test has failed.
+
+    Example::
+    
+        @deferred(timeout=5.0)
+        def test_resolve():
+            return reactor.resolve("www.python.org")
+
+    Attention! If you combine this decorator with other decorators (like
+    "raises"), deferred() must be called *first*!
+
+    In other words, this is good::
+        
+        @raises(DNSLookupError)
+        @deferred()
+        def test_error():
+            return reactor.resolve("xxxjhjhj.biz")
+
+    and this is bad::
+        
+        @deferred()
+        @raises(DNSLookupError)
+        def test_error():
+            return reactor.resolve("xxxjhjhj.biz")
+    """
+    reactor, reactor_thread = threaded_reactor()
+    if reactor is None:
+        raise ImportError("twisted is not available or could not be imported")
+    # Check for common syntax mistake
+    # (otherwise, tests can be silently ignored
+    # if one writes "@deferred" instead of "@deferred()")
+    try:
+        timeout is None or timeout + 0
+    except TypeError:
+        raise TypeError("'timeout' argument must be a number or None")
+
+    def decorate(func):
+        def wrapper(*args, **kargs):
+            q = Queue()
+            def callback(value):
+                q.put(None)
+            def errback(failure):
+                # Retrieve and save full exception info
+                try:
+                    failure.raiseException()
+                except:
+                    q.put(sys.exc_info())
+            def g():
+                try:
+                    d = func(*args, **kargs)
+                    try:
+                        d.addCallbacks(callback, errback)
+                    # Check for a common mistake and display a nice error
+                    # message
+                    except AttributeError:
+                        raise TypeError("you must return a twisted Deferred "
+                                        "from your test case!")
+                # Catch exceptions raised in the test body (from the
+                # Twisted thread)
+                except:
+                    q.put(sys.exc_info())
+            reactor.callFromThread(g)
+            try:
+                error = q.get(timeout=timeout)
+            except Empty:
+                raise TimeExpired("timeout expired before end of test (%f s.)"
+                                  % timeout)
+            # Re-raise all exceptions
+            if error is not None:
+                exc_type, exc_value, tb = error
+                raise exc_type, exc_value, tb
+        wrapper = make_decorator(func)(wrapper)
+        return wrapper
+    return decorate
+
diff --git a/nose/usage.txt b/nose/usage.txt
new file mode 100644 (file)
index 0000000..86caa2d
--- /dev/null
@@ -0,0 +1,110 @@
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+  %prog only_test_this.py
+  
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+  %prog test.module
+  %prog another.test:TestCase.test_method
+  %prog a.test:TestCase
+  %prog /path/to/test/file.py:test_function
+  
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+  %prog -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories *without*
+the -w switch:
+
+  %prog /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+.ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+  [nosetests]
+  verbosity=3
+  with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+  nosetests --plugins
+
+You can add -v or -vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/nose/util.py b/nose/util.py
new file mode 100644 (file)
index 0000000..34920fb
--- /dev/null
@@ -0,0 +1,663 @@
+"""Utility functions and classes used by nose internally.
+"""
+import inspect
+import itertools
+import logging
+import os
+import re
+import sys
+import types
+import unittest
+from nose.pyversion import ClassType, TypeType
+
+try:
+    from compiler.consts import CO_GENERATOR
+except ImportError:
+    # IronPython doesn't have a complier module
+    CO_GENERATOR=0x20
+    
+log = logging.getLogger('nose')
+
+ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
+class_types = (ClassType, TypeType)
+skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)"
+
+try:
+    set()
+    set = set # make from nose.util import set happy
+except NameError:
+    try:
+        from sets import Set as set
+    except ImportError:
+        pass
+
+
+def ls_tree(dir_path="",
+            skip_pattern=skip_pattern,
+            indent="|-- ", branch_indent="|   ",
+            last_indent="`-- ", last_branch_indent="    "):
+    # TODO: empty directories look like non-directory files
+    return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
+                                    indent, branch_indent,
+                                    last_indent, last_branch_indent))
+
+
+def _ls_tree_lines(dir_path, skip_pattern,
+                   indent, branch_indent, last_indent, last_branch_indent):
+    if dir_path == "":
+        dir_path = os.getcwd()
+
+    lines = []
+
+    names = os.listdir(dir_path)
+    names.sort()
+    dirs, nondirs = [], []
+    for name in names:
+        if re.match(skip_pattern, name):
+            continue
+        if os.path.isdir(os.path.join(dir_path, name)):
+            dirs.append(name)
+        else:
+            nondirs.append(name)
+
+    # list non-directories first
+    entries = list(itertools.chain([(name, False) for name in nondirs],
+                                   [(name, True) for name in dirs]))
+    def ls_entry(name, is_dir, ind, branch_ind):
+        if not is_dir:
+            yield ind + name
+        else:
+            path = os.path.join(dir_path, name)
+            if not os.path.islink(path):
+                yield ind + name
+                subtree = _ls_tree_lines(path, skip_pattern,
+                                         indent, branch_indent,
+                                         last_indent, last_branch_indent)
+                for x in subtree:
+                    yield branch_ind + x
+    for name, is_dir in entries[:-1]:
+        for line in ls_entry(name, is_dir, indent, branch_indent):
+            yield line
+    if entries:
+        name, is_dir = entries[-1]
+        for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
+            yield line
+
+
+def absdir(path):
+    """Return absolute, normalized path to directory, if it exists; None
+    otherwise.
+    """
+    if not os.path.isabs(path):
+        path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+                                                             path)))
+    if path is None or not os.path.isdir(path):
+        return None
+    return path
+
+
+def absfile(path, where=None):
+    """Return absolute, normalized path to file (optionally in directory
+    where), or None if the file can't be found either in where or the current
+    working directory.
+    """
+    orig = path
+    if where is None:
+        where = os.getcwd()
+    if isinstance(where, list) or isinstance(where, tuple):
+        for maybe_path in where:
+            maybe_abs = absfile(path, maybe_path)
+            if maybe_abs is not None:
+                return maybe_abs
+        return None
+    if not os.path.isabs(path):
+        path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
+    if path is None or not os.path.exists(path):
+        if where != os.getcwd():
+            # try the cwd instead
+            path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+                                                                 orig)))
+    if path is None or not os.path.exists(path):
+        return None
+    if os.path.isdir(path):
+        # might want an __init__.py from pacakge
+        init = os.path.join(path,'__init__.py')
+        if os.path.isfile(init):
+            return init
+    elif os.path.isfile(path):
+        return path
+    return None
+
+
+def anyp(predicate, iterable):
+    for item in iterable:
+        if predicate(item):
+            return True
+    return False
+
+
+def file_like(name):
+    """A name is file-like if it is a path that exists, or it has a
+    directory part, or it ends in .py, or it isn't a legal python
+    identifier.
+    """
+    return (os.path.exists(name)
+            or os.path.dirname(name)
+            or name.endswith('.py')
+            or not ident_re.match(os.path.splitext(name)[0]))
+
+
+def func_lineno(func):
+    """Get the line number of a function. First looks for
+    compat_co_firstlineno, then func_code.co_first_lineno.
+    """
+    try:
+        return func.compat_co_firstlineno
+    except AttributeError:
+        try:
+            return func.func_code.co_firstlineno
+        except AttributeError:
+            return -1
+
+
+def isclass(obj):
+    """Is obj a class? Inspect's isclass is too liberal and returns True
+    for objects that can't be subclasses of anything.
+    """
+    obj_type = type(obj)
+    return obj_type in class_types or issubclass(obj_type, type)
+
+
+def isgenerator(func):
+    try:
+        return func.func_code.co_flags & CO_GENERATOR != 0
+    except AttributeError:
+        return False
+# backwards compat (issue #64)
+is_generator = isgenerator
+
+
+def ispackage(path):
+    """
+    Is this path a package directory?
+
+    >>> ispackage('nose')
+    True
+    >>> ispackage('unit_tests')
+    False
+    >>> ispackage('nose/plugins')
+    True
+    >>> ispackage('nose/loader.py')
+    False
+    """
+    if os.path.isdir(path):
+        # at least the end of the path must be a legal python identifier
+        # and __init__.py[co] must exist
+        end = os.path.basename(path)
+        if ident_re.match(end):
+            for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
+                if os.path.isfile(os.path.join(path, init)):
+                    return True
+            if sys.platform.startswith('java') and \
+                    os.path.isfile(os.path.join(path, '__init__$py.class')):
+                return True
+    return False
+
+
+def isproperty(obj):
+    """
+    Is this a property?
+
+    >>> class Foo:
+    ...     def got(self):
+    ...         return 2
+    ...     def get(self):
+    ...         return 1
+    ...     get = property(get)
+
+    >>> isproperty(Foo.got)
+    False
+    >>> isproperty(Foo.get)
+    True
+    """
+    return type(obj) == property
+
+
+def getfilename(package, relativeTo=None):
+    """Find the python source file for a package, relative to a
+    particular directory (defaults to current working directory if not
+    given).
+    """
+    if relativeTo is None:
+        relativeTo = os.getcwd()
+    path = os.path.join(relativeTo, os.sep.join(package.split('.')))
+    suffixes = ('/__init__.py', '.py')
+    for suffix in suffixes:
+        filename = path + suffix
+        if os.path.exists(filename):
+            return filename
+    return None
+
+
+def getpackage(filename):
+    """
+    Find the full dotted package name for a given python source file
+    name. Returns None if the file is not a python source file.
+
+    >>> getpackage('foo.py')
+    'foo'
+    >>> getpackage('biff/baf.py')
+    'baf'
+    >>> getpackage('nose/util.py')
+    'nose.util'
+
+    Works for directories too.
+
+    >>> getpackage('nose')
+    'nose'
+    >>> getpackage('nose/plugins')
+    'nose.plugins'
+
+    And __init__ files stuck onto directories
+
+    >>> getpackage('nose/plugins/__init__.py')
+    'nose.plugins'
+
+    Absolute paths also work.
+
+    >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
+    >>> getpackage(path)
+    'nose.plugins'
+    """
+    src_file = src(filename)
+    if not src_file.endswith('.py') and not ispackage(src_file):
+        return None
+    base, ext = os.path.splitext(os.path.basename(src_file))
+    if base == '__init__':
+        mod_parts = []
+    else:
+        mod_parts = [base]
+    path, part = os.path.split(os.path.split(src_file)[0])
+    while part:
+        if ispackage(os.path.join(path, part)):
+            mod_parts.append(part)
+        else:
+            break
+        path, part = os.path.split(path)
+    mod_parts.reverse()
+    return '.'.join(mod_parts)
+
+
+def ln(label):
+    """Draw a 70-char-wide divider, with label in the middle.
+
+    >>> ln('hello there')
+    '---------------------------- hello there -----------------------------'
+    """
+    label_len = len(label) + 2
+    chunk = (70 - label_len) // 2
+    out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
+    pad = 70 - len(out)
+    if pad > 0:
+        out = out + ('-' * pad)
+    return out
+
+
+def resolve_name(name, module=None):
+    """Resolve a dotted name to a module and its parts. This is stolen
+    wholesale from unittest.TestLoader.loadTestByName.
+
+    >>> resolve_name('nose.util') #doctest: +ELLIPSIS
+    <module 'nose.util' from...>
+    >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
+    <function resolve_name at...>
+    """
+    parts = name.split('.')
+    parts_copy = parts[:]
+    if module is None:
+        while parts_copy:
+            try:
+                log.debug("__import__ %s", name)
+                module = __import__('.'.join(parts_copy))
+                break
+            except ImportError:
+                del parts_copy[-1]
+                if not parts_copy:
+                    raise
+        parts = parts[1:]
+    obj = module
+    log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
+    for part in parts:
+        obj = getattr(obj, part)
+    return obj
+
+
+def split_test_name(test):
+    """Split a test name into a 3-tuple containing file, module, and callable
+    names, any of which (but not all) may be blank.
+
+    Test names are in the form:
+
+    file_or_module:callable
+
+    Either side of the : may be dotted. To change the splitting behavior, you
+    can alter nose.util.split_test_re.
+    """
+    norm = os.path.normpath
+    file_or_mod = test
+    fn = None
+    if not ':' in test:
+        # only a file or mod part
+        if file_like(test):
+            return (norm(test), None, None)
+        else:
+            return (None, test, None)
+
+    # could be path|mod:callable, or a : in the file path someplace
+    head, tail = os.path.split(test)
+    if not head:
+        # this is a case like 'foo:bar' -- generally a module
+        # name followed by a callable, but also may be a windows
+        # drive letter followed by a path
+        try:
+            file_or_mod, fn = test.split(':')
+            if file_like(fn):
+                # must be a funny path
+                file_or_mod, fn = test, None
+        except ValueError:
+            # more than one : in the test
+            # this is a case like c:\some\path.py:a_test
+            parts = test.split(':')
+            if len(parts[0]) == 1:
+                file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
+            else:
+                # nonsense like foo:bar:baz
+                raise ValueError("Test name '%s' could not be parsed. Please "
+                                 "format test names as path:callable or "
+                                 "module:callable.")
+    elif not tail:
+        # this is a case like 'foo:bar/'
+        # : must be part of the file path, so ignore it
+        file_or_mod = test
+    else:
+        if ':' in tail:
+            file_part, fn = tail.split(':')
+        else:
+            file_part = tail
+        file_or_mod = os.sep.join([head, file_part])
+    if file_or_mod:
+        if file_like(file_or_mod):
+            return (norm(file_or_mod), None, fn)
+        else:
+            return (None, file_or_mod, fn)
+    else:
+        return (None, None, fn)
+split_test_name.__test__ = False # do not collect
+
+
+def test_address(test):
+    """Find the test address for a test, which may be a module, filename,
+    class, method or function.
+    """
+    if hasattr(test, "address"):
+        return test.address()
+    # type-based polymorphism sucks in general, but I believe is
+    # appropriate here
+    t = type(test)
+    file = module = call = None
+    if t == types.ModuleType:
+        file = getattr(test, '__file__', None)
+        module = getattr(test, '__name__', None)
+        return (src(file), module, call)
+    if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
+        module = getattr(test, '__module__', None)
+        if module is not None:
+            m = sys.modules[module]
+            file = getattr(m, '__file__', None)
+            if file is not None:
+                file = os.path.abspath(file)
+        call = getattr(test, '__name__', None)
+        return (src(file), module, call)
+    if t == types.MethodType:
+        cls_adr = test_address(test.im_class)
+        return (src(cls_adr[0]), cls_adr[1],
+                "%s.%s" % (cls_adr[2], test.__name__))
+    # handle unittest.TestCase instances
+    if isinstance(test, unittest.TestCase):
+        if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
+            or hasattr(test, '_testFunc')):              # 2.7
+            # unittest FunctionTestCase
+            try:
+                return test_address(test._FunctionTestCase__testFunc)
+            except AttributeError:
+                return test_address(test._testFunc)
+        # regular unittest.TestCase
+        cls_adr = test_address(test.__class__)
+        # 2.5 compat: __testMethodName changed to _testMethodName
+        try:
+            method_name = test._TestCase__testMethodName
+        except AttributeError:
+            method_name = test._testMethodName
+        return (src(cls_adr[0]), cls_adr[1],
+                "%s.%s" % (cls_adr[2], method_name))
+    if hasattr(test, '__class__') and test.__class__.__module__ != 'builtins':
+        return test_address(test.__class__)
+    raise TypeError("I don't know what %s is (%s)" % (test, t))
+test_address.__test__ = False # do not collect
+
+
+def try_run(obj, names):
+    """Given a list of possible method names, try to run them with the
+    provided object. Keep going until something works. Used to run
+    setup/teardown methods for module, package, and function tests.
+    """
+    for name in names:
+        func = getattr(obj, name, None)
+        if func is not None:
+            if type(obj) == types.ModuleType:
+                # py.test compatibility
+                try:
+                    args, varargs, varkw, defaults = inspect.getargspec(func)
+                except TypeError:
+                    # Not a function. If it's callable, call it anyway
+                    if hasattr(func, '__call__'):
+                        func = func.__call__
+                    try:
+                        args, varargs, varkw, defaults = \
+                            inspect.getargspec(func)
+                        args.pop(0) # pop the self off
+                    except TypeError:
+                        raise TypeError("Attribute %s of %r is not a python "
+                                        "function. Only functions or callables"
+                                        " may be used as fixtures." %
+                                        (name, obj))
+                if len(args):
+                    log.debug("call fixture %s.%s(%s)", obj, name, obj)
+                    return func(obj)
+            log.debug("call fixture %s.%s", obj, name)
+            return func()
+
+
+def src(filename):
+    """Find the python source file for a .pyc, .pyo or $py.class file on
+    jython. Returns the filename provided if it is not a python source
+    file.
+    """
+    if filename is None:
+        return filename
+    if sys.platform.startswith('java') and filename.endswith('$py.class'):
+        return '.'.join((filename[:-9], 'py'))
+    base, ext = os.path.splitext(filename)
+    if ext in ('.pyc', '.pyo', '.py'):
+        return '.'.join((base, 'py'))
+    return filename
+
+
+def regex_last_key(regex):
+    """Sort key function factory that puts items that match a
+    regular expression last.
+
+    >>> from nose.config import Config
+    >>> from nose.pyversion import sort_list
+    >>> c = Config()
+    >>> regex = c.testMatch
+    >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
+    >>> sort_list(entries, regex_last_key(regex))
+    >>> entries
+    ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
+    """
+    def k(obj):
+        if regex.search(obj):
+            return (1, obj)
+        return (0, obj)
+    return k
+
+
+def tolist(val):
+    """Convert a value that may be a list or a (possibly comma-separated)
+    string into a list. The exception: None is returned as None, not [None].
+
+    >>> tolist(["one", "two"])
+    ['one', 'two']
+    >>> tolist("hello")
+    ['hello']
+    >>> tolist("separate,values, with, commas,  spaces , are    ,ok")
+    ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
+    """
+    if val is None:
+        return None
+    try:
+        # might already be a list
+        val.extend([])
+        return val
+    except AttributeError:
+        pass
+    # might be a string
+    try:
+        return re.split(r'\s*,\s*', val)
+    except TypeError:
+        # who knows...
+        return list(val)
+
+
+class odict(dict):
+    """Simple ordered dict implementation, based on:
+
+    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+    """
+    def __init__(self, *arg, **kw):
+        self._keys = []
+        super(odict, self).__init__(*arg, **kw)
+
+    def __delitem__(self, key):
+        super(odict, self).__delitem__(key)
+        self._keys.remove(key)
+
+    def __setitem__(self, key, item):
+        super(odict, self).__setitem__(key, item)
+        if key not in self._keys:
+            self._keys.append(key)
+
+    def __str__(self):
+        return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
+
+    def clear(self):
+        super(odict, self).clear()
+        self._keys = []
+
+    def copy(self):
+        d = super(odict, self).copy()
+        d._keys = self._keys[:]
+        return d
+
+    def items(self):
+        return zip(self._keys, self.values())
+
+    def keys(self):
+        return self._keys[:]
+
+    def setdefault(self, key, failobj=None):
+        item = super(odict, self).setdefault(key, failobj)
+        if key not in self._keys:
+            self._keys.append(key)
+        return item
+
+    def update(self, dict):
+        super(odict, self).update(dict)
+        for key in dict.keys():
+            if key not in self._keys:
+                self._keys.append(key)
+
+    def values(self):
+        return map(self.get, self._keys)
+
+
+def transplant_func(func, module):
+    """
+    Make a function imported from module A appear as if it is located
+    in module B.
+
+    >>> from pprint import pprint
+    >>> pprint.__module__
+    'pprint'
+    >>> pp = transplant_func(pprint, __name__)
+    >>> pp.__module__
+    'nose.util'
+
+    The original function is not modified.
+
+    >>> pprint.__module__
+    'pprint'
+
+    Calling the transplanted function calls the original.
+
+    >>> pp([1, 2])
+    [1, 2]
+    >>> pprint([1,2])
+    [1, 2]
+
+    """
+    from nose.tools import make_decorator
+    def newfunc(*arg, **kw):
+        return func(*arg, **kw)
+
+    newfunc = make_decorator(func)(newfunc)
+    newfunc.__module__ = module
+    return newfunc
+
+
+def transplant_class(cls, module):
+    """
+    Make a class appear to reside in `module`, rather than the module in which
+    it is actually defined.
+
+    >>> from nose.failure import Failure
+    >>> Failure.__module__
+    'nose.failure'
+    >>> Nf = transplant_class(Failure, __name__)
+    >>> Nf.__module__
+    'nose.util'
+    >>> Nf.__name__
+    'Failure'
+
+    """
+    class C(cls):
+        pass
+    C.__module__ = module
+    C.__name__ = cls.__name__
+    return C
+
+
+def safe_str(val, encoding='utf-8'):
+    try:
+        return str(val)
+    except UnicodeEncodeError:
+        if isinstance(val, Exception):
+            return ' '.join([safe_str(arg, encoding)
+                             for arg in val])
+        return unicode(val).encode(encoding)
+
+    
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/nosetests.1 b/nosetests.1
new file mode 100644 (file)
index 0000000..aee8c77
--- /dev/null
@@ -0,0 +1,480 @@
+.TH nosetests 1 "2009-04-23" "0.11" "User Commands"
+.SH NAME
+nosetests \- nicer testing for python
+.\" Man page generated from reStructeredText.
+.INDENT 0.0
+.UNINDENT
+
+.SH SYNOPSIS
+.INDENT 0.0
+.INDENT 3.5
+nosetests [options] [names]
+
+.UNINDENT
+.UNINDENT
+
+.SH DESCRIPTION
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: \fI(?:^|[b_.\-])[Tt]est)\fP will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+
+.SS Selecting Tests
+To specify which tests to run, pass test names on the command line:
+
+
+.nf
+nosetests only_test_this.py
+.fi
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+
+.nf
+nosetests test.module
+nosetests another.test:TestCase.test_method
+nosetests a.test:TestCase
+nosetests /path/to/test/file.py:test_function
+.fi
+You may also change the working directory where nose looks for tests
+by using the \-w switch:
+
+
+.nf
+nosetests \-w /path/to/tests
+.fi
+Note, however, that support for multiple \-w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories \fIwithout\fP
+the \-w switch:
+
+
+.nf
+nosetests /path/to/tests /another/path/to/tests
+.fi
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin\-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+
+.SS Configuration
+In addition to passing command\-line options, you may also put
+configuration options in your project\'s \fIsetup.cfg\fP file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+.ini\-style config files, you put your nosetests configuration in a
+\fB[nosetests]\fP section. Options are the same as on the command line,
+with the \-\- prefix removed. For options that are simple switches, you
+must supply a value:
+
+
+.nf
+[nosetests]
+verbosity=3
+with\-doctest=1
+.fi
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the \fB\-c\fP option.
+
+
+.SS Using Plugins
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+
+.nf
+nosetests \-\-plugins
+.fi
+You can add \-v or \-vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+
+.SS 0.9 plugins
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it\'s likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+
+.SS 0.10 and 0.11 plugins
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
+
+
+.SS Options
+
+.TP
+\fB\-V\fR\fR\fR, \fB\-\-version\fR\fR
+Output nose version and exit
+
+
+.TP
+\fB\-p\fR\fR\fR, \fB\-\-plugins\fR\fR
+Output list of available plugins and exit. Combine with higher verbosity for greater detail
+
+
+.TP
+\fB\-v\fR\fR=DEFAULT\fR, \fB\-\-verbose\fR\fR=DEFAULT
+Be more verbose. [NOSE_VERBOSE]
+
+
+.TP
+\fB\-\-verbosity\fR\fR=VERBOSITY
+Set verbosity; \-\-verbosity=2 is the same as \-v
+
+
+.TP
+\fB\-q\fR\fR=DEFAULT\fR, \fB\-\-quiet\fR\fR=DEFAULT
+Be less verbose
+
+
+.TP
+\fB\-c\fR\fR=FILES\fR, \fB\-\-config\fR\fR=FILES
+Load configuration from config file(s). May be specified multiple times; in that case, all config files will be loaded and combined
+
+
+.TP
+\fB\-w\fR\fR=WHERE\fR, \fB\-\-where\fR\fR=WHERE
+Look for tests in this directory. May be specified multiple times. The first directory passed will be used as the working directory, in place of the current working directory, which is the default. Others will be added to the list of tests to execute. [NOSE_WHERE]
+
+
+.TP
+\fB\-\-py3where\fR\fR=PY3WHERE
+Look for tests in this directory under Python 3.x. Functions the same as \'where\', but only applies if running under Python 3.x or above.  Note that, if present under 3.x, this option completely replaces any directories specified with \'where\', so the \'where\' option becomes ineffective. [NOSE_PY3WHERE]
+
+
+.TP
+\fB\-m\fR\fR=REGEX\fR, \fB\-\-match\fR\fR=REGEX\fR, \fB\-\-testmatch\fR\fR=REGEX
+Files, directories, function names, and class names that match this regular expression are considered tests.  Default: (?:^|[b_./\-])[Tt]est [NOSE_TESTMATCH]
+
+
+.TP
+\fB\-\-tests\fR\fR=NAMES
+Run these tests (comma\-separated list). This argument is useful mainly from configuration files; on the command line, just pass the tests to run as additional arguments with no switch.
+
+
+.TP
+\fB\-l\fR\fR=DEFAULT\fR, \fB\-\-debug\fR\fR=DEFAULT
+Activate debug logging for one or more systems. Available debug loggers: nose, nose.importer, nose.inspector, nose.plugins, nose.result and nose.selector. Separate multiple names with a comma.
+
+
+.TP
+\fB\-\-debug\-log\fR\fR=FILE
+Log debug messages to this file (default: sys.stderr)
+
+
+.TP
+\fB\-\-logging\-config\fR\fR=FILE\fR, \fB\-\-log\-config\fR\fR=FILE
+Load logging config from this file \-\- bypasses all other logging config settings.
+
+
+.TP
+\fB\-I\fR\fR=REGEX\fR, \fB\-\-ignore\-files\fR\fR=REGEX
+Completely ignore any file that matches this regular expression. Takes precedence over any other settings or plugins. Specifying this option will replace the default setting. Specify this option multiple times to add more regular expressions [NOSE_IGNORE_FILES]
+
+
+.TP
+\fB\-e\fR\fR=REGEX\fR, \fB\-\-exclude\fR\fR=REGEX
+Don\'t run tests that match regular expression [NOSE_EXCLUDE]
+
+
+.TP
+\fB\-i\fR\fR=REGEX\fR, \fB\-\-include\fR\fR=REGEX
+This regular expression will be applied to files, directories, function names, and class names for a chance to include additional tests that do not match TESTMATCH.  Specify this option multiple times to add more regular expressions [NOSE_INCLUDE]
+
+
+.TP
+\fB\-x\fR\fR\fR, \fB\-\-stop\fR\fR
+Stop running tests after the first error or failure
+
+
+.TP
+\fB\-P\fR\fR\fR, \fB\-\-no\-path\-adjustment\fR\fR
+Don\'t make any changes to sys.path when loading tests [NOSE_NOPATH]
+
+
+.TP
+\fB\-\-exe\fR\fR
+Look for tests in python modules that are executable. Normal behavior is to exclude executable modules, since they may not be import\-safe [NOSE_INCLUDE_EXE]
+
+
+.TP
+\fB\-\-noexe\fR\fR
+DO NOT look for tests in python modules that are executable. (The default on the windows platform is to do so.)
+
+
+.TP
+\fB\-\-traverse\-namespace\fR\fR
+Traverse through all path entries of a namespace package
+
+
+.TP
+\fB\-\-first\-package\-wins\fR\fR\fR, \fB\-\-first\-pkg\-wins\fR\fR\fR, \fB\-\-1st\-pkg\-wins\fR\fR
+nose\'s importer will normally evict a package from sys.modules if it sees a package with the same name in a different location. Set this option to disable that behavior.
+
+
+.TP
+\fB\-a\fR\fR=ATTR\fR, \fB\-\-attr\fR\fR=ATTR
+Run only tests that have attributes specified by ATTR [NOSE_ATTR]
+
+
+.TP
+\fB\-A\fR\fR=EXPR\fR, \fB\-\-eval\-attr\fR\fR=EXPR
+Run only tests for whose attributes the Python expression EXPR evaluates to True [NOSE_EVAL_ATTR]
+
+
+.TP
+\fB\-s\fR\fR\fR, \fB\-\-nocapture\fR\fR
+Don\'t capture stdout (any stdout output will be printed immediately) [NOSE_NOCAPTURE]
+
+
+.TP
+\fB\-\-nologcapture\fR\fR
+Disable logging capture plugin. Logging configurtion will be left intact. [NOSE_NOLOGCAPTURE]
+
+
+.TP
+\fB\-\-logging\-format\fR\fR=FORMAT
+Specify custom format to print statements. Uses the same format as used by standard logging handlers. [NOSE_LOGFORMAT]
+
+
+.TP
+\fB\-\-logging\-datefmt\fR\fR=FORMAT
+Specify custom date/time format to print statements. Uses the same format as used by standard logging handlers. [NOSE_LOGDATEFMT]
+
+
+.TP
+\fB\-\-logging\-filter\fR\fR=FILTER
+Specify which statements to filter in/out. By default, everything is captured. If the output is too verbose,
+use this option to filter out needless output.
+Example: filter=foo will capture statements issued ONLY to
+ foo or foo.what.ever.sub but not foobar or other logger.
+Specify multiple loggers with comma: filter=foo,bar,baz.
+If any logger name is prefixed with a minus, eg filter=\-foo,
+it will be excluded rather than included. Default: exclude logging messages from nose itself (\-nose). [NOSE_LOGFILTER]
+
+
+.TP
+\fB\-\-logging\-clear\-handlers\fR\fR
+Clear all other logging handlers
+
+
+.TP
+\fB\-\-with\-coverage\fR\fR
+Enable plugin Coverage: 
+Activate a coverage report using Ned Batchelder\'s coverage module.
+ [NOSE_WITH_COVERAGE]
+
+
+.TP
+\fB\-\-cover\-package\fR\fR=PACKAGE
+Restrict coverage output to selected packages [NOSE_COVER_PACKAGE]
+
+
+.TP
+\fB\-\-cover\-erase\fR\fR
+Erase previously collected coverage statistics before run
+
+
+.TP
+\fB\-\-cover\-tests\fR\fR
+Include test modules in coverage report [NOSE_COVER_TESTS]
+
+
+.TP
+\fB\-\-cover\-inclusive\fR\fR
+Include all python files under working directory in coverage report.  Useful for discovering holes in test coverage if not all files are imported by the test suite. [NOSE_COVER_INCLUSIVE]
+
+
+.TP
+\fB\-\-cover\-html\fR\fR
+Produce HTML coverage information
+
+
+.TP
+\fB\-\-cover\-html\-dir\fR\fR=DIR
+Produce HTML coverage information in dir
+
+
+.TP
+\fB\-\-pdb\fR\fR
+Drop into debugger on errors
+
+
+.TP
+\fB\-\-pdb\-failures\fR\fR
+Drop into debugger on failures
+
+
+.TP
+\fB\-\-no\-deprecated\fR\fR
+Disable special handling of DeprecatedTest exceptions.
+
+
+.TP
+\fB\-\-with\-doctest\fR\fR
+Enable plugin Doctest: 
+Activate doctest plugin to find and run doctests in non\-test modules.
+ [NOSE_WITH_DOCTEST]
+
+
+.TP
+\fB\-\-doctest\-tests\fR\fR
+Also look for doctests in test modules. Note that classes, methods and functions should have either doctests or non\-doctest tests, not both. [NOSE_DOCTEST_TESTS]
+
+
+.TP
+\fB\-\-doctest\-extension\fR\fR=EXT
+Also look for doctests in files with this extension [NOSE_DOCTEST_EXTENSION]
+
+
+.TP
+\fB\-\-doctest\-result\-variable\fR\fR=VAR
+Change the variable name set to the result of the last interpreter command from the default \'_\'. Can be used to avoid conflicts with the _() function used for text translation. [NOSE_DOCTEST_RESULT_VAR]
+
+
+.TP
+\fB\-\-doctest\-fixtures\fR\fR=SUFFIX
+Find fixtures for a doctest file in module with this name appended to the base name of the doctest file
+
+
+.TP
+\fB\-\-with\-isolation\fR\fR
+Enable plugin IsolationPlugin: 
+Activate the isolation plugin to isolate changes to external
+modules to a single test module or package. The isolation plugin
+resets the contents of sys.modules after each test module or
+package runs to its state before the test. PLEASE NOTE that this
+plugin should not be used with the coverage plugin, or in any other case
+where module reloading may produce undesirable side\-effects.
+ [NOSE_WITH_ISOLATION]
+
+
+.TP
+\fB\-d\fR\fR\fR, \fB\-\-detailed\-errors\fR\fR\fR, \fB\-\-failure\-detail\fR\fR
+Add detail to error output by attempting to evaluate failed asserts [NOSE_DETAILED_ERRORS]
+
+
+.TP
+\fB\-\-with\-profile\fR\fR
+Enable plugin Profile: 
+Use this plugin to run tests using the hotshot profiler. 
+ [NOSE_WITH_PROFILE]
+
+
+.TP
+\fB\-\-profile\-sort\fR\fR=SORT
+Set sort order for profiler output
+
+
+.TP
+\fB\-\-profile\-stats\-file\fR\fR=FILE
+Profiler stats file; default is a new temp file on each run
+
+
+.TP
+\fB\-\-profile\-restrict\fR\fR=RESTRICT
+Restrict profiler output. See help for pstats.Stats for details
+
+
+.TP
+\fB\-\-no\-skip\fR\fR
+Disable special handling of SkipTest exceptions.
+
+
+.TP
+\fB\-\-with\-id\fR\fR
+Enable plugin TestId: 
+Activate to add a test id (like #1) to each test name output. Activate
+with \-\-failed to rerun failing tests only.
+ [NOSE_WITH_ID]
+
+
+.TP
+\fB\-\-id\-file\fR\fR=FILE
+Store test ids found in test runs in this file. Default is the file .noseids in the working directory.
+
+
+.TP
+\fB\-\-failed\fR\fR
+Run the tests that failed in the last test run.
+
+
+.TP
+\fB\-\-processes\fR\fR=NUM
+Spread test run among this many processes. Set a number equal to the number of processors or cores in your machine for best results. [NOSE_PROCESSES]
+
+
+.TP
+\fB\-\-process\-timeout\fR\fR=SECONDS
+Set timeout for return of results from each test runner process. [NOSE_PROCESS_TIMEOUT]
+
+
+.TP
+\fB\-\-process\-restartworker\fR\fR
+If set, will restart each worker process once their tests are done, this helps control memory leaks from killing the system. [NOSE_PROCESS_RESTARTWORKER]
+
+
+.TP
+\fB\-\-with\-xunit\fR\fR
+Enable plugin Xunit: This plugin provides test results in the standard XUnit XML format. [NOSE_WITH_XUNIT]
+
+
+.TP
+\fB\-\-xunit\-file\fR\fR=FILE
+Path to xml file to store the xunit report in. Default is nosetests.xml in the working directory [NOSE_XUNIT_FILE]
+
+
+.TP
+\fB\-\-all\-modules\fR\fR
+Enable plugin AllModules: Collect tests from all python modules.
+ [NOSE_ALL_MODULES]
+
+
+.TP
+\fB\-\-collect\-only\fR\fR
+Enable collect\-only: 
+Collect and output test names only, don\'t run any tests.
+ [COLLECT_ONLY]
+
+
+.SH AUTHOR
+jpellerin+nose@gmail.com
+
+.SH COPYRIGHT
+LGPL
+
+.\" Generated by docutils manpage writer on 2011-07-30 18:55.
+.\" 
diff --git a/patch.py b/patch.py
new file mode 100644 (file)
index 0000000..981097c
--- /dev/null
+++ b/patch.py
@@ -0,0 +1,639 @@
+""" Patch utility to apply unified diffs
+
+    Brute-force line-by-line non-recursive parsing 
+
+    Copyright (c) 2008-2010 anatoly techtonik
+    Available under the terms of MIT license
+
+    NOTE: This version has been patched by Alex Stewart <alex@foogod.com> for
+    Python 3.x support and other misc fixups.
+
+    Project home: http://code.google.com/p/python-patch/
+
+
+    $Id: patch.py 92 2010-07-02 06:04:57Z techtonik $
+    $HeadURL: http://python-patch.googlecode.com/svn/trunk/patch.py $
+"""
+
+__author__ = "techtonik.rainforce.org"
+__version__ = "10.04-2.pAS1"
+
+import copy
+import logging
+import re
+from logging import debug, info, warning
+import sys
+
+try:
+  # cStringIO doesn't support unicode in 2.5
+  from StringIO import StringIO
+except ImportError:
+  # StringIO has been renamed to 'io' in 3.x
+  from io import StringIO
+
+from os.path import exists, isfile, abspath
+from os import unlink
+
+_open = open
+
+if sys.version_info >= (3,):
+    # Open files with universal newline support but no newline translation (3.x)
+    def open(filename, mode='r'):
+        return _open(filename, mode, newline='')
+else:
+    # Open files with universal newline support but no newline translation (2.x)
+    def open(filename, mode='r'):
+        return _open(filename, mode + 'b')
+
+    # Python 3.x has changed iter.next() to be next(iter) instead, so for
+    # backwards compatibility, we'll just define a next() function under 2.x
+    def next(iter):
+        return iter.next()
+
+
+#------------------------------------------------
+# Logging is controlled by "python_patch" logger
+
+debugmode = False
+
+logger = logging.getLogger("python_patch")
+loghandler = logging.StreamHandler()
+logger.addHandler(loghandler)
+
+debug = logger.debug
+info = logger.info
+warning = logger.warning
+
+# If called as a library, don't log info/debug messages by default.
+logger.setLevel(logging.WARN)
+
+#------------------------------------------------
+
+# constants for patch types
+
+DIFF = PLAIN = "plain"
+HG = MERCURIAL = "mercurial"
+SVN = SUBVERSION = "svn"
+
+
+def fromfile(filename):
+  """ Parse patch file and return Patch() object
+  """
+  info("reading patch from file %s" % filename)
+  fp = open(filename, "r")
+  patch = Patch(fp)
+  fp.close()
+  return patch
+
+
+def fromstring(s):
+  """ Parse text string and return Patch() object
+  """
+  return Patch( StringIO(s) )
+
+
+
+class HunkInfo(object):
+  """ Parsed hunk data container (hunk starts with @@ -R +R @@) """
+
+  def __init__(self):
+    self.startsrc=None #: line count starts with 1
+    self.linessrc=None
+    self.starttgt=None
+    self.linestgt=None
+    self.invalid=False
+    self.text=[]
+
+  def copy(self):
+    return copy.copy(self)
+
+#  def apply(self, estream):
+#    """ write hunk data into enumerable stream
+#        return strings one by one until hunk is
+#        over
+#
+#        enumerable stream are tuples (lineno, line)
+#        where lineno starts with 0
+#    """
+#    pass
+
+
+
+class Patch(object):
+
+  def __init__(self, stream=None):
+
+    # define Patch data members
+    # table with a row for every source file
+
+    #: list of source filenames
+    self.source=None
+    self.target=None
+    #: list of lists of hunks
+    self.hunks=None
+    #: file endings statistics for every hunk
+    self.hunkends=None
+    #: headers for each file
+    self.header=None
+
+    #: patch type - one of constants
+    self.type = None
+
+    if stream:
+      self.parse(stream)
+
+  def copy(self):
+    return copy.copy(self)
+
+  def parse(self, stream):
+    """ parse unified diff """
+    self.header = []
+
+    self.source = []
+    self.target = []
+    self.hunks = []
+    self.hunkends = []
+
+    # define possible file regions that will direct the parser flow
+    headscan  = False # scanning header before the patch body
+    filenames = False # lines starting with --- and +++
+
+    hunkhead = False  # @@ -R +R @@ sequence
+    hunkbody = False  #
+    hunkskip = False  # skipping invalid hunk mode
+
+    headscan = True
+    lineends = dict(lf=0, crlf=0, cr=0)
+    nextfileno = 0
+    nexthunkno = 0    #: even if index starts with 0 user messages number hunks from 1
+
+    # hunkinfo holds parsed values, hunkactual - calculated
+    hunkinfo = HunkInfo()
+    hunkactual = dict(linessrc=None, linestgt=None)
+
+
+    fe = enumerate(stream)
+    for lineno, line in fe:
+
+      # read out header
+      if headscan:
+        header = ''
+        try:
+          while not line.startswith("--- "):
+            header += line
+            lineno, line = next(fe)
+        except StopIteration:
+            # this is actually a loop exit
+            continue
+        self.header.append(header)
+
+        headscan = False
+        # switch to filenames state
+        filenames = True
+
+      # hunkskip and hunkbody code skipped until definition of hunkhead is parsed
+      if hunkbody:
+        # process line first
+        if re.match(r"^[- \+\\]", line):
+            # gather stats about line endings
+            if line.endswith("\r\n"):
+              self.hunkends[nextfileno-1]["crlf"] += 1
+            elif line.endswith("\n"):
+              self.hunkends[nextfileno-1]["lf"] += 1
+            elif line.endswith("\r"):
+              self.hunkends[nextfileno-1]["cr"] += 1
+              
+            if line.startswith("-"):
+              hunkactual["linessrc"] += 1
+            elif line.startswith("+"):
+              hunkactual["linestgt"] += 1
+            elif not line.startswith("\\"):
+              hunkactual["linessrc"] += 1
+              hunkactual["linestgt"] += 1
+            hunkinfo.text.append(line)
+            # todo: handle \ No newline cases
+        else:
+            warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, self.target[nextfileno-1]))
+            # add hunk status node
+            self.hunks[nextfileno-1].append(hunkinfo.copy())
+            self.hunks[nextfileno-1][nexthunkno-1]["invalid"] = True
+            # switch to hunkskip state
+            hunkbody = False
+            hunkskip = True
+
+        # check exit conditions
+        if hunkactual["linessrc"] > hunkinfo.linessrc or hunkactual["linestgt"] > hunkinfo.linestgt:
+            warning("extra hunk no.%d lines at %d for target %s" % (nexthunkno, lineno+1, self.target[nextfileno-1]))
+            # add hunk status node
+            self.hunks[nextfileno-1].append(hunkinfo.copy())
+            self.hunks[nextfileno-1][nexthunkno-1]["invalid"] = True
+            # switch to hunkskip state
+            hunkbody = False
+            hunkskip = True
+        elif hunkinfo.linessrc == hunkactual["linessrc"] and hunkinfo.linestgt == hunkactual["linestgt"]:
+            self.hunks[nextfileno-1].append(hunkinfo.copy())
+            # switch to hunkskip state
+            hunkbody = False
+            hunkskip = True
+
+            # detect mixed window/unix line ends
+            ends = self.hunkends[nextfileno-1]
+            if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
+              warning("inconsistent line ends in patch hunks for %s" % self.source[nextfileno-1])
+            if debugmode:
+              debuglines = dict(ends)
+              debuglines.update(file=self.target[nextfileno-1], hunk=nexthunkno)
+              debug("crlf: %(crlf)d  lf: %(lf)d  cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
+
+      if hunkskip:
+        match = re.match("^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?", line)
+        if match:
+          # switch to hunkhead state
+          hunkskip = False
+          hunkhead = True
+        elif line.startswith("--- "):
+          # switch to filenames state
+          hunkskip = False
+          filenames = True
+          if debugmode and len(self.source) > 0:
+            debug("- %2d hunks for %s" % (len(self.hunks[nextfileno-1]), self.source[nextfileno-1]))
+
+      if filenames:
+        if line.startswith("--- "):
+          if nextfileno in self.source:
+            warning("skipping invalid patch for %s" % self.source[nextfileno])
+            del self.source[nextfileno]
+            # double source filename line is encountered
+            # attempt to restart from this second line
+          re_filename = "^--- ([^\t]+)"
+          match = re.match(re_filename, line)
+          # todo: support spaces in filenames
+          if match:
+            self.source.append(match.group(1).strip())
+          else:
+            warning("skipping invalid filename at line %d" % lineno)
+            # switch back to headscan state
+            filenames = False
+            headscan = True
+        elif not line.startswith("+++ "):
+          if nextfileno in self.source:
+            warning("skipping invalid patch with no target for %s" % self.source[nextfileno])
+            del self.source[nextfileno]
+          else:
+            # this should be unreachable
+            warning("skipping invalid target patch")
+          filenames = False
+          headscan = True
+        else:
+          if nextfileno in self.target:
+            warning("skipping invalid patch - double target at line %d" % lineno)
+            del self.source[nextfileno]
+            del self.target[nextfileno]
+            nextfileno -= 1
+            # double target filename line is encountered
+            # switch back to headscan state
+            filenames = False
+            headscan = True
+          else:
+            re_filename = "^\+\+\+ ([^\t]+)"
+            match = re.match(re_filename, line)
+            if not match:
+              warning("skipping invalid patch - no target filename at line %d" % lineno)
+              # switch back to headscan state
+              filenames = False
+              headscan = True
+            else:
+              self.target.append(match.group(1).strip())
+              nextfileno += 1
+              # switch to hunkhead state
+              filenames = False
+              hunkhead = True
+              nexthunkno = 0
+              self.hunks.append([])
+              self.hunkends.append(lineends.copy())
+              continue
+
+      if hunkhead:
+        match = re.match("^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?", line)
+        if not match:
+          if nextfileno-1 not in self.hunks:
+            warning("skipping invalid patch with no hunks for file %s" % self.target[nextfileno-1])
+            # switch to headscan state
+            hunkhead = False
+            headscan = True
+            continue
+          else:
+            # switch to headscan state
+            hunkhead = False
+            headscan = True
+        else:
+          hunkinfo.startsrc = int(match.group(1))
+          hunkinfo.linessrc = 1
+          if match.group(3): hunkinfo.linessrc = int(match.group(3))
+          hunkinfo.starttgt = int(match.group(4))
+          hunkinfo.linestgt = 1
+          if match.group(6): hunkinfo.linestgt = int(match.group(6))
+          hunkinfo.invalid = False
+          hunkinfo.text = []
+
+          hunkactual["linessrc"] = hunkactual["linestgt"] = 0
+
+          # switch to hunkbody state
+          hunkhead = False
+          hunkbody = True
+          nexthunkno += 1
+          continue
+
+    if not hunkskip:
+      warning("patch file incomplete - %s" % filename)
+      # sys.exit(?)
+    else:
+      # duplicated message when an eof is reached
+      if debugmode and len(self.source) > 0:
+          debug("- %2d hunks for %s" % (len(self.hunks[nextfileno-1]), self.source[nextfileno-1]))
+
+    info("total files: %d  total hunks: %d" % (len(self.source), sum([len(hset) for hset in self.hunks])))
+
+
+  def apply(self):
+    """ apply parsed patch """
+
+    total = len(self.source)
+    for fileno, filename in enumerate(self.source):
+
+      f2patch = filename
+      if not exists(f2patch):
+        f2patch = self.target[fileno]
+        if not exists(f2patch):
+          warning("source/target file does not exist\n--- %s\n+++ %s" % (filename, f2patch))
+          continue
+      if not isfile(f2patch):
+        warning("not a file - %s" % f2patch)
+        continue
+      filename = f2patch
+
+      info("processing %d/%d:\t %s" % (fileno+1, total, filename))
+
+      # validate before patching
+      f2fp = open(filename)
+      hunkno = 0
+      hunk = self.hunks[fileno][hunkno]
+      hunkfind = []
+      hunkreplace = []
+      validhunks = 0
+      canpatch = False
+      for lineno, line in enumerate(f2fp):
+        if lineno+1 < hunk.startsrc:
+          continue
+        elif lineno+1 == hunk.startsrc:
+          hunkfind = [x[1:].rstrip("\r\n") for x in hunk.text if x[0] in " -"]
+          hunkreplace = [x[1:].rstrip("\r\n") for x in hunk.text if x[0] in " +"]
+          #pprint(hunkreplace)
+          hunklineno = 0
+
+          # todo \ No newline at end of file
+
+        # check hunks in source file
+        if lineno+1 < hunk.startsrc+len(hunkfind)-1:
+          if line.rstrip("\r\n") == hunkfind[hunklineno]:
+            hunklineno+=1
+          else:
+            debug("hunk no.%d doesn't match source file %s" % (hunkno+1, filename))
+            # file may be already patched, but we will check other hunks anyway
+            hunkno += 1
+            if hunkno < len(self.hunks[fileno]):
+              hunk = self.hunks[fileno][hunkno]
+              continue
+            else:
+              break
+
+        # check if processed line is the last line
+        if lineno+1 == hunk.startsrc+len(hunkfind)-1:
+          debug("file %s hunk no.%d -- is ready to be patched" % (filename, hunkno+1))
+          hunkno+=1
+          validhunks+=1
+          if hunkno < len(self.hunks[fileno]):
+            hunk = self.hunks[fileno][hunkno]
+          else:
+            if validhunks == len(self.hunks[fileno]):
+              # patch file
+              canpatch = True
+              break
+      else:
+        if hunkno < len(self.hunks[fileno]):
+          warning("premature end of source file %s at hunk %d" % (filename, hunkno+1))
+
+      f2fp.close()
+
+      if validhunks < len(self.hunks[fileno]):
+        if self._match_file_hunks(filename, self.hunks[fileno]):
+          warning("already patched  %s" % filename)
+        else:
+          warning("source file is different - %s" % filename)
+      if canpatch:
+        backupname = filename+".orig"
+        if exists(backupname):
+          warning("can't backup original file to %s - aborting" % backupname)
+        else:
+          import shutil
+          shutil.move(filename, backupname)
+          if self.write_hunks(backupname, filename, self.hunks[fileno]):
+            info("successfully patched %s" % filename)
+            unlink(backupname)
+          else:
+            warning("error patching file %s" % filename)
+            shutil.copy(filename, filename+".invalid")
+            warning("invalid version is saved to %s" % filename+".invalid")
+            # todo: proper rejects
+            shutil.move(backupname, filename)
+
+    # todo: check for premature eof
+
+
+  def can_patch(self, filename):
+    """ Check if specified filename can be patched. Returns None if file can
+    not be found among source filenames. False if patch can not be applied
+    clearly. True otherwise.
+
+    :returns: True, False or None
+    """
+    idx = self._get_file_idx(filename, source=True)
+    if idx == None:
+      return None
+    return self._match_file_hunks(filename, self.hunks[idx])
+    
+
+  def _match_file_hunks(self, filepath, hunks):
+    matched = True
+    fp = open(abspath(filepath))
+
+    class NoMatch(Exception):
+      pass
+
+    lineno = 1
+    line = fp.readline()
+    hno = None
+    try:
+      for hno, h in enumerate(hunks):
+        # skip to first line of the hunk
+        while lineno < h.starttgt:
+          if not len(line): # eof
+            debug("check failed - premature eof before hunk: %d" % (hno+1))
+            raise NoMatch
+          line = fp.readline()
+          lineno += 1
+        for hline in h.text:
+          if hline.startswith("-"):
+            continue
+          if not len(line):
+            debug("check failed - premature eof on hunk: %d" % (hno+1))
+            # todo: \ No newline at the end of file
+            raise NoMatch
+          if line.rstrip("\r\n") != hline[1:].rstrip("\r\n"):
+            debug("file is not patched - failed hunk: %d" % (hno+1))
+            raise NoMatch
+          line = fp.readline()
+          lineno += 1
+
+    except NoMatch:
+      matched = False
+      # todo: display failed hunk, i.e. expected/found
+
+    fp.close()
+    return matched
+
+
+  def patch_stream(self, instream, hunks):
+    """ Generator that yields stream patched with hunks iterable
+    
+        Converts lineends in hunk lines to the best suitable format
+        autodetected from input
+    """
+
+    # todo: At the moment substituted lineends may not be the same
+    #       at the start and at the end of patching. Also issue a
+    #       warning/throw about mixed lineends (is it really needed?)
+
+    hunks = iter(hunks)
+
+    srclineno = 1
+
+    lineends = {'\n':0, '\r\n':0, '\r':0}
+    def get_line():
+      """
+      local utility function - return line from source stream
+      collecting line end statistics on the way
+      """
+      line = instream.readline()
+        # 'U' mode works only with text files
+      if line.endswith("\r\n"):
+        lineends["\r\n"] += 1
+      elif line.endswith("\n"):
+        lineends["\n"] += 1
+      elif line.endswith("\r"):
+        lineends["\r"] += 1
+      return line
+
+    for hno, h in enumerate(hunks):
+      debug("hunk %d" % (hno+1))
+      # skip to line just before hunk starts
+      while srclineno < h.startsrc:
+        yield get_line()
+        srclineno += 1
+
+      for hline in h.text:
+        # todo: check \ No newline at the end of file
+        if hline.startswith("-") or hline.startswith("\\"):
+          get_line()
+          srclineno += 1
+          continue
+        else:
+          if not hline.startswith("+"):
+            get_line()
+            srclineno += 1
+          line2write = hline[1:]
+          # detect if line ends are consistent in source file
+          if sum([bool(lineends[x]) for x in lineends]) == 1:
+            newline = [x for x in lineends if lineends[x] != 0][0]
+            yield line2write.rstrip("\r\n")+newline
+          else: # newlines are mixed
+            yield line2write
+     
+    for line in instream:
+      yield line
+
+
+  def write_hunks(self, srcname, tgtname, hunks):
+    src = open(srcname, "r")
+    tgt = open(tgtname, "w")
+
+    debug("processing target file %s" % tgtname)
+
+    tgt.writelines(self.patch_stream(src, hunks))
+
+    tgt.close()
+    src.close()
+    return True
+  
+
+  def _get_file_idx(self, filename, source=None):
+    """ Detect index of given filename within patch.
+
+        :param filename:
+        :param source: search filename among sources (True),
+                       targets (False), or both (None)
+        :returns: int or None
+    """
+    filename = abspath(filename)
+    if source == True or source == None:
+      for i,fnm in enumerate(self.source):
+        if filename == abspath(fnm):
+          return i  
+    if source == False or source == None:
+      for i,fnm in enumerate(self.target):
+        if filename == abspath(fnm):
+          return i  
+
+
+
+
+if __name__ == "__main__":
+  from optparse import OptionParser
+  from os.path import exists
+  import sys
+
+  opt = OptionParser(usage="%prog [options] unipatch-file", version="python-patch %s" % __version__)
+  opt.add_option("-d", "--debug", action="store_true", dest="debugmode", help="Print debugging messages")
+  opt.add_option("-q", "--quiet", action="store_true", dest="quiet", help="Only print messages on warning/error")
+  (options, args) = opt.parse_args()
+
+  if not args:
+    opt.print_version()
+    opt.print_help()
+    sys.exit()
+  debugmode = options.debugmode
+  patchfile = args[0]
+  if not exists(patchfile) or not isfile(patchfile):
+    sys.exit("patch file does not exist - %s" % patchfile)
+
+
+  if debugmode:
+    loglevel = logging.DEBUG
+    logformat = "%(levelname)8s %(message)s"
+  elif options.quiet:
+    loglevel = logging.WARN
+    logformat = "%(message)s"
+  else:
+    loglevel = logging.INFO
+    logformat = "%(message)s"
+  logger.setLevel(loglevel)
+  loghandler.setFormatter(logging.Formatter(logformat))
+
+
+
+  patch = fromfile(patchfile)
+  #pprint(patch)
+  patch.apply()
+
+  # todo: document and test line ends handling logic - patch.py detects proper line-endings
+  #       for inserted hunks and issues a warning if patched file has incosistent line ends
diff --git a/selftest.py b/selftest.py
new file mode 100755 (executable)
index 0000000..b07d71f
--- /dev/null
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+"""Test the copy of nose in this directory, by running that nose against itself.
+
+You can test nose using nose in other ways, but if you don't use this script,
+you might have one installation of nose testing another installation, which is
+not supported.
+"""
+
+# More detail:
+
+# In the absence of some sort of deep renaming magic, nose can't reasonably
+# test a different installation of itself, given the existence of the global
+# module registry sys.modules .
+
+# If installed system-wide with setuptools, setuptools (via the site-packages
+# easy-install.pth) takes you at your word and ensures that the installed nose
+# comes first on sys.path .  So the only way to test a copy of nose other than
+# the installed one is to install that version (e.g. by running python setup.py
+# develop).
+
+# This script provides a way of running nose on nose's own tests without
+# installing the version to be tested, nor uninstalling the currently-installed
+# version.
+
+import glob
+import os
+import sys
+
+
+if __name__ == "__main__":
+    this_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
+    lib_dirs = [this_dir]
+    test_dir = this_dir
+    if sys.version_info >= (3,):
+        # Under Python 3.x, we need to 'build' the source (using 2to3, etc)
+        # first.  'python3 setup.py build_tests' will put everything under
+        # build/tests (including nose itself, since some tests are inside the
+        # nose source)
+        # The 'py3where' argument in setup.cfg will take care of making sure we
+        # pull our tests only from the build/tests directory.  We just need to
+        # make sure the right things are on sys.path.
+        lib_dirs = glob.glob(os.path.join(this_dir, 'build', 'lib*'))
+        test_dir = os.path.join(this_dir, 'build', 'tests')
+        if not os.path.isdir(test_dir):
+            raise AssertionError("Error: %s does not exist.  Use the setup.py 'build_tests' command to create it." % (test_dir,))
+    try:
+        import pkg_resources
+        env = pkg_resources.Environment(search_path=lib_dirs)
+        distributions = env["nose"]
+        assert len(distributions) == 1, (
+                "Incorrect usage of selftest.py; please see DEVELOPERS.txt")
+        dist = distributions[0]
+        dist.activate()
+    except ImportError:
+        pass
+    # Always make sure our chosen test dir is first on the path
+    sys.path.insert(0, test_dir)
+    import nose
+    nose.run_exit()
diff --git a/setup.cfg b/setup.cfg
new file mode 100644 (file)
index 0000000..790cd44
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,11 @@
+[nosetests]
+with-doctest=1
+doctest-extension=.rst
+doctest-fixtures=_fixtures
+py3where=build/tests
+
+[bdist_rpm]
+doc_files = man/man1/nosetests.1 README.txt
+;; Uncomment if your platform automatically gzips man pages
+;; See README.BDIST_RPM
+;; install_script = install-rpm.sh
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..d8615c3
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,121 @@
+import sys
+import os
+
+VERSION = '1.1.2'
+py_vers_tag = '-%s.%s' % sys.version_info[:2]
+
+test_dirs = ['functional_tests', 'unit_tests', os.path.join('doc','doc_tests'), 'nose']
+
+if sys.version_info >= (3,):
+    try:
+        import setuptools
+    except ImportError:
+        from distribute_setup import use_setuptools
+        use_setuptools()
+
+    extra = {'use_2to3': True,
+             'test_dirs': test_dirs,
+             'test_build_dir': 'build/tests',
+             'pyversion_patching': True,
+             }
+else:
+    extra = {}
+
+try:
+    from setup3lib import setup
+    from setuptools import find_packages
+    addl_args = dict(
+        zip_safe = False,
+        packages = find_packages(),
+        entry_points = {
+        'console_scripts': [
+            'nosetests = nose:run_exit',
+            'nosetests%s = nose:run_exit' % py_vers_tag,
+            ],
+        'distutils.commands': [
+            ' nosetests = nose.commands:nosetests',
+            ],
+        },
+        test_suite = 'nose.collector',
+        )
+    addl_args.update(extra)
+
+    # This is required by multiprocess plugin; on Windows, if
+    # the launch script is not import-safe, spawned processes
+    # will re-run it, resulting in an infinite loop.
+    if sys.platform == 'win32':
+        import re
+        from setuptools.command.easy_install import easy_install
+
+        def wrap_write_script(self, script_name, contents, *arg, **kwarg):
+            bad_text = re.compile(
+                "\n"
+                "sys.exit\(\n"
+                "   load_entry_point\(([^\)]+)\)\(\)\n"
+                "\)\n")
+            good_text = (
+                "\n"
+                "if __name__ == '__main__':\n"
+                "    sys.exit(\n"
+                r"        load_entry_point(\1)()\n"
+                "    )\n"
+                )
+            contents = bad_text.sub(good_text, contents)
+            return self._write_script(script_name, contents, *arg, **kwarg)
+        easy_install._write_script = easy_install.write_script
+        easy_install.write_script = wrap_write_script
+
+except ImportError:
+    from distutils.core import setup
+    addl_args = dict(
+        packages = ['nose', 'nose.ext', 'nose.plugins', 'nose.sphinx'],
+        scripts = ['bin/nosetests'],
+        )
+
+setup(
+    name = 'nose',
+    version = VERSION,
+    author = 'Jason Pellerin',
+    author_email = 'jpellerin+nose@gmail.com',
+    description = ('nose extends unittest to make testing easier'),
+    long_description = \
+    """nose extends the test loading and running features of unittest, making
+    it easier to write, find and run tests.
+
+    By default, nose will run tests in files or directories under the current
+    working directory whose names include "test" or "Test" at a word boundary
+    (like "test_this" or "functional_test" or "TestClass" but not
+    "libtest"). Test output is similar to that of unittest, but also includes
+    captured stdout output from failing tests, for easy print-style debugging.
+
+    These features, and many more, are customizable through the use of
+    plugins. Plugins included with nose provide support for doctest, code
+    coverage and profiling, flexible attribute-based test selection,
+    output capture and more. More information about writing plugins may be
+    found on in the nose API documentation, here:
+    http://somethingaboutorange.com/mrl/projects/nose/
+
+    If you have recently reported a bug marked as fixed, or have a craving for
+    the very latest, you may want the unstable development version instead:
+    http://bitbucket.org/jpellerin/nose/get/tip.gz#egg=nose-dev
+    """,
+    license = 'GNU LGPL',
+    keywords = 'test unittest doctest automatic discovery',
+    url = 'http://readthedocs.org/docs/nose/',
+    data_files = [('man/man1', ['nosetests.1'])],
+    package_data = {'': ['*.txt',
+                         'examples/*.py',
+                         'examples/*/*.py']},
+    classifiers = [
+        'Development Status :: 4 - Beta',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+        'Natural Language :: English',
+        'Operating System :: OS Independent',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 3',
+        'Topic :: Software Development :: Testing'
+        ],
+    **addl_args
+    )
+
diff --git a/setup3lib.py b/setup3lib.py
new file mode 100644 (file)
index 0000000..27bdb93
--- /dev/null
@@ -0,0 +1,140 @@
+import sys
+from setuptools import setup as _setup
+
+py3_args = ['use_2to3', 'convert_2to3_doctests', 'use_2to3_fixers', 'test_dirs', 'test_build_dir', 'doctest_exts', 'pyversion_patching']
+
+if sys.version_info < (3,):
+    # Remove any Python-3.x-only arguments (so they don't generate complaints
+    # from 2.x setuptools) and then just pass through to the regular setup
+    # routine.
+    def setup(*args, **kwargs):
+        for a in py3_args:
+            if a in kwargs:
+                del kwargs[a]
+        return _setup(*args, **kwargs)
+else:
+    import os
+    import re
+    import logging
+    from setuptools import Distribution as _Distribution
+    from distutils.core import Command
+    from setuptools.command.build_py import Mixin2to3
+    from distutils import dir_util, file_util, log
+    import setuptools.command.test
+    from pkg_resources import normalize_path
+    try:
+        import patch
+        patch.logger.setLevel(logging.WARN)
+    except ImportError:
+        patch = None
+
+    patchfile_re = re.compile(r'(.*)\.py([0-9.]+)\.patch$')
+
+    def pyversion_patch(filename):
+        '''Find the best pyversion-fixup patch for a given filename and apply
+           it.
+        '''
+        dir, file = os.path.split(filename)
+        best_ver = (0,)
+        patchfile = None
+        for dirfile in os.listdir(dir):
+            m = patchfile_re.match(dirfile)
+            if not m:
+                continue
+            base, ver = m.groups()
+            if base != file:
+                continue
+            ver = tuple([int(v) for v in ver.split('.')])
+            if sys.version_info >= ver and ver > best_ver:
+                best_ver = ver
+                patchfile = dirfile
+        if not patchfile:
+            return False
+        log.info("Applying %s to %s..." % (patchfile, filename))
+        cwd = os.getcwd()
+        os.chdir(dir)
+        try:
+            p = patch.fromfile(patchfile)
+            p.apply()
+        finally:
+            os.chdir(cwd)
+        return True
+
+    class Distribution (_Distribution):
+        def __init__(self, attrs=None):
+            self.test_dirs = []
+            self.test_build_dir = None
+            self.doctest_exts = ['.py', '.rst']
+            self.pyversion_patching = False
+            _Distribution.__init__(self, attrs)
+
+    class BuildTestsCommand (Command, Mixin2to3):
+        # Create mirror copy of tests, convert all .py files using 2to3
+        user_options = []
+
+        def initialize_options(self):
+            self.test_base = None
+
+        def finalize_options(self):
+            test_base = self.distribution.test_build_dir
+            if not test_base:
+                bcmd = self.get_finalized_command('build')
+                test_base = bcmd.build_base
+            self.test_base = test_base
+
+        def run(self):
+            use_2to3 = getattr(self.distribution, 'use_2to3', False)
+            test_dirs = getattr(self.distribution, 'test_dirs', [])
+            test_base = self.test_base
+            bpy_cmd = self.get_finalized_command("build_py")
+            lib_base = normalize_path(bpy_cmd.build_lib)
+            modified = []
+            py_modified = []
+            doc_modified = []
+            dir_util.mkpath(test_base)
+            for testdir in test_dirs:
+              for srcdir, dirnames, filenames in os.walk(testdir):
+                destdir = os.path.join(test_base, srcdir)
+                dir_util.mkpath(destdir)
+                for fn in filenames:
+                    if fn.startswith("."):
+                        # Skip .svn folders and such
+                        continue
+                    dstfile, copied = file_util.copy_file(
+                                          os.path.join(srcdir, fn),
+                                          os.path.join(destdir, fn),
+                                          update=True)
+                    if copied:
+                        modified.append(dstfile)
+                        if fn.endswith('.py'):
+                            py_modified.append(dstfile)
+                        for ext in self.distribution.doctest_exts:
+                            if fn.endswith(ext):
+                                doc_modified.append(dstfile)
+                                break
+            if use_2to3:
+                self.run_2to3(py_modified)
+                self.run_2to3(doc_modified, True)
+            if self.distribution.pyversion_patching:
+                if patch is not None:
+                    for file in modified:
+                        pyversion_patch(file)
+                else:
+                    log.warn("Warning: pyversion_patching specified in setup config but patch module not found.  Patching will not be performed.")
+
+            dir_util.mkpath(lib_base)
+            self.reinitialize_command('egg_info', egg_base=lib_base)
+            self.run_command('egg_info')
+
+    class TestCommand (setuptools.command.test.test):
+        # Override 'test' command to make sure 'build_tests' gets run first.
+        def run(self):
+            self.run_command('build_tests')
+            setuptools.command.test.test.run(self)
+
+    def setup(*args, **kwargs):
+        kwargs.setdefault('distclass', Distribution)
+        cmdclass = kwargs.setdefault('cmdclass', {})
+        cmdclass.setdefault('build_tests', BuildTestsCommand)
+        cmdclass.setdefault('test', TestCommand)
+        return _setup(*args, **kwargs)
diff --git a/unit_tests/helpers$py.class b/unit_tests/helpers$py.class
new file mode 100644 (file)
index 0000000..b2e4431
Binary files /dev/null and b/unit_tests/helpers$py.class differ
diff --git a/unit_tests/helpers.py b/unit_tests/helpers.py
new file mode 100644 (file)
index 0000000..0a5d68d
--- /dev/null
@@ -0,0 +1,6 @@
+def iter_compat(suite):
+    try:
+        suite.__iter__
+        return suite
+    except AttributeError:
+        return suite._tests
diff --git a/unit_tests/helpers.pyc b/unit_tests/helpers.pyc
new file mode 100644 (file)
index 0000000..d952a01
Binary files /dev/null and b/unit_tests/helpers.pyc differ
diff --git a/unit_tests/mock$py.class b/unit_tests/mock$py.class
new file mode 100644 (file)
index 0000000..8111d9f
Binary files /dev/null and b/unit_tests/mock$py.class differ
diff --git a/unit_tests/mock.py b/unit_tests/mock.py
new file mode 100644 (file)
index 0000000..98e7d43
--- /dev/null
@@ -0,0 +1,107 @@
+import imp
+import sys
+from nose.config import Config
+from nose import proxy
+from nose.plugins.manager import NoPlugins
+from nose.util import odict
+
+
+def mod(name):
+    m = imp.new_module(name)
+    sys.modules[name] = m
+    return m
+    
+class ResultProxyFactory:
+    def __call__(self, result, test):
+        return ResultProxy(result, test)
+
+
+class ResultProxy(proxy.ResultProxy):
+    called = []
+    def __init__(self, result, test):
+        self.result = result
+        self.test = test
+    def afterTest(self, test):
+        self.assertMyTest(test)
+        self.called.append(('afterTest', test))
+    def beforeTest(self, test):
+        self.assertMyTest(test)
+        self.called.append(('beforeTest', test))
+    def startTest(self, test):
+        print "proxy startTest"
+        self.assertMyTest(test)
+        self.called.append(('startTest', test))
+    def stopTest(self, test):
+        print "proxy stopTest"
+        self.assertMyTest(test)
+        self.called.append(('stopTest', test))
+    def addDeprecated(self, test, err):
+        print "proxy addDeprecated"
+        self.assertMyTest(test)
+        self.called.append(('addDeprecated', test, err))
+    def addError(self, test, err):
+        print "proxy addError"
+        self.assertMyTest(test)
+        self.called.append(('addError', test, err))
+    def addFailure(self, test, err):
+        print "proxy addFailure"
+        self.assertMyTest(test)
+        self.called.append(('addFailure', test, err))
+    def addSkip(self, test, err):
+        print "proxy addSkip"
+        self.assertMyTest(test)
+        self.called.append(('addSkip', test, err))
+    def addSuccess(self, test):
+        self.assertMyTest(test)
+        self.called.append(('addSuccess', test))
+    
+
+class RecordingPluginManager(object):
+
+    def __init__(self):
+        self.reset()
+
+    def __getattr__(self, call):
+        return RecordingPluginProxy(self, call)
+
+    def null_call(self, call, *arg, **kw):
+        return getattr(self._nullPluginManager, call)(*arg, **kw)
+
+    def reset(self):
+        self._nullPluginManager = NoPlugins()
+        self.called = odict()
+
+    def calls(self):
+        return self.called.keys()
+
+
+class RecordingPluginProxy(object):
+
+    def __init__(self, manager, call):
+        self.man = manager
+        self.call = call
+
+    def __call__(self, *arg, **kw):
+        self.man.called.setdefault(self.call, []).append((arg, kw))
+        return self.man.null_call(self.call, *arg, **kw)
+
+
+class Bucket(object):
+    def __init__(self, **kw):
+        self.__dict__['d'] = {}
+        self.__dict__['d'].update(kw)
+        
+    def __getattr__(self, attr):
+        if not self.__dict__.has_key('d'):
+            return None
+        return self.__dict__['d'].get(attr)
+
+    def __setattr__(self, attr, val):        
+        self.d[attr] = val
+
+
+class MockOptParser(object):
+    def __init__(self):
+        self.opts = []
+    def add_option(self, *args, **kw):
+        self.opts.append((args, kw))
diff --git a/unit_tests/mock.pyc b/unit_tests/mock.pyc
new file mode 100644 (file)
index 0000000..b21df0a
Binary files /dev/null and b/unit_tests/mock.pyc differ
diff --git a/unit_tests/support/bug101/tests.py b/unit_tests/support/bug101/tests.py
new file mode 100644 (file)
index 0000000..5d7c3e3
--- /dev/null
@@ -0,0 +1,9 @@
+def my_decor(func):
+    return lambda: func()
+
+def test_decor():
+    pass
+
+def test_decor1():
+    pass
+test_decor1 = my_decor(test_decor1)
diff --git a/unit_tests/support/bug105/tests$py.class b/unit_tests/support/bug105/tests$py.class
new file mode 100644 (file)
index 0000000..b486085
Binary files /dev/null and b/unit_tests/support/bug105/tests$py.class differ
diff --git a/unit_tests/support/bug105/tests.py b/unit_tests/support/bug105/tests.py
new file mode 100644 (file)
index 0000000..63a368b
--- /dev/null
@@ -0,0 +1,49 @@
+from nose import tools
+
+def test_z():
+    """(1) test z"""
+    pass
+
+def test_a():
+    """(2) test a"""
+    pass
+
+def test_rz():
+    """(3) Test with raises decorator"""
+    raise TypeError("err")
+test_rz = tools.raises(TypeError)(test_rz)
+
+def decorate(func):
+    func.attr = 1
+    return func
+
+def dec_replace(func):
+    def newfunc():
+        func()
+        pass
+    return newfunc
+
+def dec_makedecorator(func):
+    def newfunc():
+        pass
+    newfunc = tools.make_decorator(func)(newfunc)
+    return newfunc
+
+def test_dz():
+    """(4) Test with non-replacing decorator"""
+    pass
+test_dz = decorate(test_dz)
+
+def test_rz():
+    """(5) Test with replacing decorator"""
+    pass
+test_rz = dec_replace(test_rz)
+
+def test_mdz():
+    """(6) Test with make_decorator decorator"""
+    pass
+test_mdz = dec_makedecorator(test_mdz)
+
+def test_b():
+    """(7) test b"""
+    pass
diff --git a/unit_tests/support/bug105/tests.pyc b/unit_tests/support/bug105/tests.pyc
new file mode 100644 (file)
index 0000000..8956736
Binary files /dev/null and b/unit_tests/support/bug105/tests.pyc differ
diff --git a/unit_tests/support/config_defaults/a.cfg b/unit_tests/support/config_defaults/a.cfg
new file mode 100644 (file)
index 0000000..4bc5e22
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+verbosity = 3
diff --git a/unit_tests/support/config_defaults/b.cfg b/unit_tests/support/config_defaults/b.cfg
new file mode 100644 (file)
index 0000000..e329464
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+verbosity = 5
diff --git a/unit_tests/support/config_defaults/invalid.cfg b/unit_tests/support/config_defaults/invalid.cfg
new file mode 100644 (file)
index 0000000..34b6a0c
--- /dev/null
@@ -0,0 +1 @@
+spam
diff --git a/unit_tests/support/config_defaults/invalid_value.cfg b/unit_tests/support/config_defaults/invalid_value.cfg
new file mode 100644 (file)
index 0000000..bc05d74
--- /dev/null
@@ -0,0 +1,2 @@
+[nosetests]
+verbosity = spam
diff --git a/unit_tests/support/doctest/err_doctests$py.class b/unit_tests/support/doctest/err_doctests$py.class
new file mode 100644 (file)
index 0000000..c087b71
Binary files /dev/null and b/unit_tests/support/doctest/err_doctests$py.class differ
diff --git a/unit_tests/support/doctest/err_doctests.py b/unit_tests/support/doctest/err_doctests.py
new file mode 100644 (file)
index 0000000..6d60696
--- /dev/null
@@ -0,0 +1,12 @@
+"""
+Module with errors in doctest formatting.
+
+    >>> 1
+    'this is\n an error'
+"""
+def foo():
+    pass
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/unit_tests/support/doctest/err_doctests.pyc b/unit_tests/support/doctest/err_doctests.pyc
new file mode 100644 (file)
index 0000000..9b86402
Binary files /dev/null and b/unit_tests/support/doctest/err_doctests.pyc differ
diff --git a/unit_tests/support/doctest/no_doctests$py.class b/unit_tests/support/doctest/no_doctests$py.class
new file mode 100644 (file)
index 0000000..6f3c244
Binary files /dev/null and b/unit_tests/support/doctest/no_doctests$py.class differ
diff --git a/unit_tests/support/doctest/no_doctests.py b/unit_tests/support/doctest/no_doctests.py
new file mode 100644 (file)
index 0000000..7e3750e
--- /dev/null
@@ -0,0 +1,9 @@
+"""
+Module without doctests.
+"""
+def foo():
+    pass
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/unit_tests/support/doctest/no_doctests.pyc b/unit_tests/support/doctest/no_doctests.pyc
new file mode 100644 (file)
index 0000000..84fa6cf
Binary files /dev/null and b/unit_tests/support/doctest/no_doctests.pyc differ
diff --git a/unit_tests/support/foo/__init__$py.class b/unit_tests/support/foo/__init__$py.class
new file mode 100644 (file)
index 0000000..acc8100
Binary files /dev/null and b/unit_tests/support/foo/__init__$py.class differ
diff --git a/unit_tests/support/foo/__init__.py b/unit_tests/support/foo/__init__.py
new file mode 100644 (file)
index 0000000..66e0a5e
--- /dev/null
@@ -0,0 +1,7 @@
+boodle = True
+
+def somefunc():
+    """This is a doctest in somefunc.
+    >>> 'a'
+    'a'
+    """
diff --git a/unit_tests/support/foo/__init__.pyc b/unit_tests/support/foo/__init__.pyc
new file mode 100644 (file)
index 0000000..a1ea891
Binary files /dev/null and b/unit_tests/support/foo/__init__.pyc differ
diff --git a/unit_tests/support/foo/bar/__init__$py.class b/unit_tests/support/foo/bar/__init__$py.class
new file mode 100644 (file)
index 0000000..1af5241
Binary files /dev/null and b/unit_tests/support/foo/bar/__init__$py.class differ
diff --git a/unit_tests/support/foo/bar/__init__.py b/unit_tests/support/foo/bar/__init__.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/unit_tests/support/foo/bar/__init__.pyc b/unit_tests/support/foo/bar/__init__.pyc
new file mode 100644 (file)
index 0000000..eb29a09
Binary files /dev/null and b/unit_tests/support/foo/bar/__init__.pyc differ
diff --git a/unit_tests/support/foo/bar/buz$py.class b/unit_tests/support/foo/bar/buz$py.class
new file mode 100644 (file)
index 0000000..dfe7605
Binary files /dev/null and b/unit_tests/support/foo/bar/buz$py.class differ
diff --git a/unit_tests/support/foo/bar/buz.py b/unit_tests/support/foo/bar/buz.py
new file mode 100644 (file)
index 0000000..48c886d
--- /dev/null
@@ -0,0 +1,8 @@
+from foo import boodle
+
+def afunc():
+    """This is a doctest
+    >>> 2 + 3
+    5
+    """
+    pass
diff --git a/unit_tests/support/foo/bar/buz.pyc b/unit_tests/support/foo/bar/buz.pyc
new file mode 100644 (file)
index 0000000..faf6f3a
Binary files /dev/null and b/unit_tests/support/foo/bar/buz.pyc differ
diff --git a/unit_tests/support/foo/doctests.txt b/unit_tests/support/foo/doctests.txt
new file mode 100644 (file)
index 0000000..e4b8d5b
--- /dev/null
@@ -0,0 +1,7 @@
+Doctests in a text file.
+
+    >>> 1 + 2
+    3
+
+    >>> ['a', 'b'] + ['c']
+    ['a', 'b', 'c']
diff --git a/unit_tests/support/foo/test_foo.py b/unit_tests/support/foo/test_foo.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/unit_tests/support/foo/tests/dir_test_file.py b/unit_tests/support/foo/tests/dir_test_file.py
new file mode 100644 (file)
index 0000000..79b86ec
--- /dev/null
@@ -0,0 +1,3 @@
+# test file in test dir in a package
+def test_foo():
+    pass
diff --git a/unit_tests/support/issue006/tests$py.class b/unit_tests/support/issue006/tests$py.class
new file mode 100644 (file)
index 0000000..ca42201
Binary files /dev/null and b/unit_tests/support/issue006/tests$py.class differ
diff --git a/unit_tests/support/issue006/tests.py b/unit_tests/support/issue006/tests.py
new file mode 100644 (file)
index 0000000..5c8ee60
--- /dev/null
@@ -0,0 +1,19 @@
+class Test1(object):
+    def test_nested_generator(self):
+        def func():
+            pass
+        yield func,
+
+    def test_nested_generator_mult(self):
+        def f2(a):
+            pass
+        for b in range(1, 4):
+            yield f2, b
+
+    def try_something(self, a):
+        pass
+
+    def test_normal_generator(self):
+        yield self.try_something, 1
+        yield 'try_something', 2
+        
diff --git a/unit_tests/support/issue006/tests.pyc b/unit_tests/support/issue006/tests.pyc
new file mode 100644 (file)
index 0000000..30a7b9c
Binary files /dev/null and b/unit_tests/support/issue006/tests.pyc differ
diff --git a/unit_tests/support/issue065/tests$py.class b/unit_tests/support/issue065/tests$py.class
new file mode 100644 (file)
index 0000000..6771803
Binary files /dev/null and b/unit_tests/support/issue065/tests$py.class differ
diff --git a/unit_tests/support/issue065/tests.py b/unit_tests/support/issue065/tests.py
new file mode 100644 (file)
index 0000000..d246458
--- /dev/null
@@ -0,0 +1,5 @@
+class D(dict):
+    def __getattr__(self, k):
+        return dict.__getitem__(self, k)
+
+test = D()
diff --git a/unit_tests/support/issue065/tests.pyc b/unit_tests/support/issue065/tests.pyc
new file mode 100644 (file)
index 0000000..bb645c8
Binary files /dev/null and b/unit_tests/support/issue065/tests.pyc differ
diff --git a/unit_tests/support/issue270/__init__.py b/unit_tests/support/issue270/__init__.py
new file mode 100644 (file)
index 0000000..264b0f9
--- /dev/null
@@ -0,0 +1,2 @@
+def setup():
+    pass
diff --git a/unit_tests/support/issue270/__init__.pyc b/unit_tests/support/issue270/__init__.pyc
new file mode 100644 (file)
index 0000000..2183483
Binary files /dev/null and b/unit_tests/support/issue270/__init__.pyc differ
diff --git a/unit_tests/support/issue270/foo_test.py b/unit_tests/support/issue270/foo_test.py
new file mode 100644 (file)
index 0000000..5a629d3
--- /dev/null
@@ -0,0 +1,7 @@
+class Foo_Test:
+    
+    def test_foo(self):
+        pass
+
+    def test_bar(self):
+        pass
diff --git a/unit_tests/support/issue270/foo_test.pyc b/unit_tests/support/issue270/foo_test.pyc
new file mode 100644 (file)
index 0000000..9b21f1f
Binary files /dev/null and b/unit_tests/support/issue270/foo_test.pyc differ
diff --git a/unit_tests/support/other/file.txt b/unit_tests/support/other/file.txt
new file mode 100644 (file)
index 0000000..792d600
--- /dev/null
@@ -0,0 +1 @@
+#
diff --git a/unit_tests/support/pkgorg/lib/modernity.py b/unit_tests/support/pkgorg/lib/modernity.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/unit_tests/support/pkgorg/tests/test_mod.py b/unit_tests/support/pkgorg/tests/test_mod.py
new file mode 100644 (file)
index 0000000..2516258
--- /dev/null
@@ -0,0 +1,4 @@
+import modernity
+
+def test():
+    pass
diff --git a/unit_tests/support/script.py b/unit_tests/support/script.py
new file mode 100755 (executable)
index 0000000..9e33d77
--- /dev/null
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+print "FAIL"
diff --git a/unit_tests/support/test-dir/test.py b/unit_tests/support/test-dir/test.py
new file mode 100644 (file)
index 0000000..2ae2839
--- /dev/null
@@ -0,0 +1 @@
+pass
diff --git a/unit_tests/support/test.py b/unit_tests/support/test.py
new file mode 100644 (file)
index 0000000..9ad04e0
--- /dev/null
@@ -0,0 +1,13 @@
+import unittest
+
+class Something(unittest.TestCase):
+    def test_something(self):
+        pass
+
+class TestTwo:
+
+    def __repr__(self):
+        return 'TestTwo'
+    
+    def test_whatever(self):
+        pass
diff --git a/unit_tests/test_attribute_plugin$py.class b/unit_tests/test_attribute_plugin$py.class
new file mode 100644 (file)
index 0000000..61d1937
Binary files /dev/null and b/unit_tests/test_attribute_plugin$py.class differ
diff --git a/unit_tests/test_attribute_plugin.py b/unit_tests/test_attribute_plugin.py
new file mode 100644 (file)
index 0000000..0df0e99
--- /dev/null
@@ -0,0 +1,53 @@
+# There are more attribute plugin unit tests in unit_tests/test_plugins.py
+from nose.tools import eq_
+from nose.plugins.attrib import attr
+
+def test_flags():
+    # @attr('one','two')
+    def test():
+        pass
+    test = attr('one','two')(test)
+    
+    eq_(test.one, 1)
+    eq_(test.two, 1)
+
+def test_values():
+    # @attr(mood="hohum", colors=['red','blue'])
+    def test():
+        pass
+    test = attr(mood="hohum", colors=['red','blue'])(test)
+    
+    eq_(test.mood, "hohum")
+    eq_(test.colors, ['red','blue'])
+
+def test_mixed():
+    # @attr('slow', 'net', role='integration')
+    def test():
+        pass
+    test = attr('slow', 'net', role='integration')(test)
+    
+    eq_(test.slow, 1)
+    eq_(test.net, 1)
+    eq_(test.role, 'integration')
+
+def test_class_attrs():
+    # @attr('slow', 'net', role='integration')
+    class MyTest:
+        def setUp():
+            pass
+        def test_one(self):
+            pass
+        def test_two(self):
+            pass
+
+    class SubClass(MyTest):
+        pass
+
+    MyTest = attr('slow', 'net', role='integration')(MyTest)
+    eq_(MyTest.slow, 1)
+    eq_(MyTest.net, 1)
+    eq_(MyTest.role, 'integration')
+    eq_(SubClass.slow, 1)
+
+    assert not hasattr(MyTest.setUp, 'slow')
+    assert not hasattr(MyTest.test_two, 'slow')
diff --git a/unit_tests/test_attribute_plugin.pyc b/unit_tests/test_attribute_plugin.pyc
new file mode 100644 (file)
index 0000000..72ac361
Binary files /dev/null and b/unit_tests/test_attribute_plugin.pyc differ
diff --git a/unit_tests/test_bug105$py.class b/unit_tests/test_bug105$py.class
new file mode 100644 (file)
index 0000000..b315d04
Binary files /dev/null and b/unit_tests/test_bug105$py.class differ
diff --git a/unit_tests/test_bug105.py b/unit_tests/test_bug105.py
new file mode 100644 (file)
index 0000000..e0362a8
--- /dev/null
@@ -0,0 +1,32 @@
+import os
+import unittest
+
+class TestBug105(unittest.TestCase):
+
+    def test_load_in_def_order(self):
+        from nose.loader import TestLoader
+
+        where = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'support', 'bug105'))
+
+        l = TestLoader()
+        testmod = l.loadTestsFromDir(where).next()
+        print testmod
+        testmod.setUp()
+
+        def fix(t):
+            s = str(t)
+            if ': ' in s:
+                return s[s.index(': ')+2:]
+            return s
+        
+        tests = map(fix, testmod)
+        print tests
+        self.assertEqual(tests, ['tests.test_z', 'tests.test_a',
+                                 'tests.test_dz', 'tests.test_mdz',
+                                 'tests.test_b'])
+
+
+if __name__ == '__main__':
+    unittest.main()
+        
diff --git a/unit_tests/test_bug105.pyc b/unit_tests/test_bug105.pyc
new file mode 100644 (file)
index 0000000..9831360
Binary files /dev/null and b/unit_tests/test_bug105.pyc differ
diff --git a/unit_tests/test_capture_plugin$py.class b/unit_tests/test_capture_plugin$py.class
new file mode 100644 (file)
index 0000000..e0f6971
Binary files /dev/null and b/unit_tests/test_capture_plugin$py.class differ
diff --git a/unit_tests/test_capture_plugin.py b/unit_tests/test_capture_plugin.py
new file mode 100644 (file)
index 0000000..2f721f0
--- /dev/null
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+import sys
+import unittest
+from optparse import OptionParser
+from nose.config import Config
+from nose.plugins.capture import Capture
+
+class TestCapturePlugin(unittest.TestCase):
+
+    def setUp(self):
+        self._stdout = sys.stdout
+
+    def tearDown(self):
+        sys.stdout = self._stdout
+
+    def test_enabled_by_default(self):
+        c = Capture()
+        assert c.enabled
+
+    def test_can_be_disabled(self):
+        c = Capture()
+        parser = OptionParser()
+        c.addOptions(parser)
+        options, args = parser.parse_args(['test_can_be_disabled',
+                                           '-s'])
+        c.configure(options, Config())
+        assert not c.enabled
+
+        c = Capture()
+        options, args = parser.parse_args(['test_can_be_disabled_long',
+                                           '--nocapture'])
+        c.configure(options, Config())
+        assert not c.enabled
+
+        env = {'NOSE_NOCAPTURE': 1}
+        c = Capture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['test_can_be_disabled'])
+        c.configure(options, Config())
+        assert not c.enabled
+
+        c = Capture()
+        parser = OptionParser()
+        c.addOptions(parser)
+        
+        options, args = parser.parse_args(['test_can_be_disabled'])
+        c.configure(options, Config())
+        assert c.enabled
+        
+    def test_captures_stdout(self):
+        c = Capture()
+        c.start()
+        print "Hello"
+        c.end()
+        self.assertEqual(c.buffer, "Hello\n")
+        
+    def test_captures_nonascii_stdout(self):
+        c = Capture()
+        c.start()
+        print "test 日本"
+        c.end()
+        self.assertEqual(c.buffer, "test 日本\n")
+
+    def test_format_error(self):
+        class Dummy:
+            pass
+        d = Dummy()
+        c = Capture()
+        c.start()
+        try:
+            print "Oh my!"
+            raise Exception("boom")
+        except:
+            err = sys.exc_info()
+        formatted = c.formatError(d, err)
+        ec, ev, tb = err
+        (fec, fev, ftb) = formatted
+        # print fec, fev, ftb
+        
+        self.assertEqual(ec, fec)
+        self.assertEqual(tb, ftb)
+        assert 'Oh my!' in fev, "Output not found in error message"
+        assert 'Oh my!' in d.capturedOutput, "Output not attached to test"
+
+    def test_format_nonascii_error(self):
+        class Dummy:
+            pass
+        d = Dummy()
+        c = Capture()
+        c.start()
+        try:
+            print "debug 日本"
+            raise AssertionError(u'response does not contain 名')
+        except:
+            err = sys.exc_info()
+        formatted = c.formatError(d, err)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_capture_plugin.pyc b/unit_tests/test_capture_plugin.pyc
new file mode 100644 (file)
index 0000000..7ea31e7
Binary files /dev/null and b/unit_tests/test_capture_plugin.pyc differ
diff --git a/unit_tests/test_cases$py.class b/unit_tests/test_cases$py.class
new file mode 100644 (file)
index 0000000..92fda91
Binary files /dev/null and b/unit_tests/test_cases$py.class differ
diff --git a/unit_tests/test_cases.py b/unit_tests/test_cases.py
new file mode 100644 (file)
index 0000000..af399e5
--- /dev/null
@@ -0,0 +1,274 @@
+import unittest
+import pdb
+import sys
+import nose.case
+import nose.failure
+from nose.pyversion import unbound_method
+from nose.config import Config
+from mock import ResultProxyFactory, ResultProxy
+
+class TestNoseCases(unittest.TestCase):
+
+    def test_function_test_case(self):
+        res = unittest.TestResult()
+        
+        a = []
+        def func(a=a):
+            a.append(1)
+
+        case = nose.case.FunctionTestCase(func)
+        case(res)
+        assert a[0] == 1
+
+    def test_method_test_case(self):
+        res = unittest.TestResult()
+
+        a = []
+        class TestClass(object):
+            def test_func(self, a=a):
+                a.append(1)
+
+        case = nose.case.MethodTestCase(unbound_method(TestClass,
+                                                       TestClass.test_func))
+        case(res)
+        assert a[0] == 1
+
+    def test_method_test_case_with_metaclass(self):
+        res = unittest.TestResult()
+        
+        class TestType(type):
+            def __new__(cls, name, bases, dct):
+                return type.__new__(cls, name, bases, dct)
+        a = []
+        class TestClass(object):
+            __metaclass__ = TestType
+            def test_func(self, a=a):
+                a.append(1)
+
+        case = nose.case.MethodTestCase(unbound_method(TestClass,
+                                                       TestClass.test_func))
+        case(res)
+        assert a[0] == 1
+
+    def test_method_test_case_fixtures(self):        
+        res = unittest.TestResult()
+        called = []
+        class TestClass(object):
+            def setup(self):
+                called.append('setup')
+            def teardown(self):
+                called.append('teardown')
+            def test_func(self):
+                called.append('test')
+
+        case = nose.case.MethodTestCase(unbound_method(TestClass,
+                                                       TestClass.test_func))
+        case(res)
+        self.assertEqual(called, ['setup', 'test', 'teardown'])
+
+        class TestClassFailingSetup(TestClass):
+            def setup(self):
+                called.append('setup')
+                raise Exception("failed")
+        called[:] = []
+        case = nose.case.MethodTestCase(unbound_method(TestClassFailingSetup,
+                                            TestClassFailingSetup.test_func))
+        case(res)
+        self.assertEqual(called, ['setup'])        
+
+        class TestClassFailingTest(TestClass):
+            def test_func(self):
+                called.append('test')
+                raise Exception("failed")
+            
+        called[:] = []
+        case = nose.case.MethodTestCase(unbound_method(TestClassFailingTest,
+                                            TestClassFailingTest.test_func))
+        case(res)
+        self.assertEqual(called, ['setup', 'test', 'teardown'])     
+        
+    def test_function_test_case_fixtures(self):
+        from nose.tools import with_setup
+        res = unittest.TestResult()
+
+        called = {}
+
+        def st():
+            called['st'] = True
+        def td():
+            called['td'] = True
+
+        def func_exc():
+            called['func'] = True
+            raise TypeError("An exception")
+
+        func_exc = with_setup(st, td)(func_exc)
+        case = nose.case.FunctionTestCase(func_exc)
+        case(res)
+        assert 'st' in called
+        assert 'func' in called
+        assert 'td' in called
+
+    def test_failure_case(self):
+        res = unittest.TestResult()
+        f = nose.failure.Failure(ValueError, "No such test spam")
+        f(res)
+        assert res.errors
+
+
+class TestNoseTestWrapper(unittest.TestCase):
+    def test_case_fixtures_called(self):
+        """Instance fixtures are properly called for wrapped tests"""
+        res = unittest.TestResult()
+        called = []
+                        
+        class TC(unittest.TestCase):
+            def setUp(self):
+                print "TC setUp %s" % self
+                called.append('setUp')
+            def runTest(self):
+                print "TC runTest %s" % self
+                called.append('runTest')
+            def tearDown(self):
+                print "TC tearDown %s" % self
+                called.append('tearDown')
+
+        case = nose.case.Test(TC())
+        case(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        self.assertEqual(called, ['setUp', 'runTest', 'tearDown'])
+
+    def test_result_proxy_used(self):
+        """A result proxy is used to wrap the result for all tests"""
+        class TC(unittest.TestCase):
+            def runTest(self):
+                raise Exception("error")
+            
+        ResultProxy.called[:] = []
+        res = unittest.TestResult()
+        config = Config()
+        case = nose.case.Test(TC(), config=config,
+                              resultProxy=ResultProxyFactory())
+
+        case(res)
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+
+        calls = [ c[0] for c in ResultProxy.called ]
+        self.assertEqual(calls, ['beforeTest', 'startTest', 'addError',
+                                 'stopTest', 'afterTest'])
+
+    def test_address(self):
+        from nose.util import absfile, src
+        class TC(unittest.TestCase):
+            def runTest(self):
+                raise Exception("error")
+
+        def dummy(i):
+            pass
+
+        def test():
+            pass
+
+        class Test:
+            def test(self):
+                pass
+
+            def test_gen(self):
+                def tryit(i):
+                    pass
+                for i in range (0, 2):
+                    yield tryit, i
+
+            def try_something(self, a, b):
+                pass
+
+        fl = src(absfile(__file__))
+        case = nose.case.Test(TC())
+        self.assertEqual(case.address(), (fl, __name__, 'TC.runTest'))
+
+        case = nose.case.Test(nose.case.FunctionTestCase(test))
+        self.assertEqual(case.address(), (fl, __name__, 'test'))
+
+        case = nose.case.Test(nose.case.FunctionTestCase(
+            dummy, arg=(1,), descriptor=test))
+        self.assertEqual(case.address(), (fl, __name__, 'test'))
+
+        case = nose.case.Test(nose.case.MethodTestCase(
+                                  unbound_method(Test, Test.test)))
+        self.assertEqual(case.address(), (fl, __name__, 'Test.test'))
+
+        case = nose.case.Test(
+            nose.case.MethodTestCase(unbound_method(Test, Test.try_something),
+                                     arg=(1,2,),
+                                     descriptor=unbound_method(Test,
+                                                               Test.test_gen)))
+        self.assertEqual(case.address(),
+                         (fl, __name__, 'Test.test_gen'))
+
+        case = nose.case.Test(
+            nose.case.MethodTestCase(unbound_method(Test, Test.test_gen),
+                                     test=dummy, arg=(1,)))
+        self.assertEqual(case.address(),
+                         (fl, __name__, 'Test.test_gen'))
+
+    def test_context(self):
+        class TC(unittest.TestCase):
+            def runTest(self):
+                pass
+        def test():
+            pass
+
+        class Test:
+            def test(self):
+                pass
+
+        case = nose.case.Test(TC())
+        self.assertEqual(case.context, TC)
+
+        case = nose.case.Test(nose.case.FunctionTestCase(test))
+        self.assertEqual(case.context, sys.modules[__name__])
+
+        case = nose.case.Test(nose.case.MethodTestCase(unbound_method(Test,
+                                                           Test.test)))
+        self.assertEqual(case.context, Test)
+
+    def test_short_description(self):
+        class TC(unittest.TestCase):
+            def test_a(self):
+                """
+                This is the description
+                """
+                pass
+
+            def test_b(self):
+                """This is the description
+                """
+                pass
+
+            def test_c(self):
+                pass
+
+        case_a = nose.case.Test(TC('test_a'))
+        case_b = nose.case.Test(TC('test_b'))
+        case_c = nose.case.Test(TC('test_c'))
+
+        assert case_a.shortDescription().endswith("This is the description")
+        assert case_b.shortDescription().endswith("This is the description")
+        assert case_c.shortDescription() in (None, # pre 2.7
+                                             'test_c (test_cases.TC)') # 2.7
+
+    def test_unrepresentable_shortDescription(self):
+        class TC(unittest.TestCase):
+            def __str__(self):
+                # see issue 422
+                raise ValueError('simulate some mistake in this code')
+            def runTest(self):
+                pass
+
+        case = nose.case.Test(TC())
+        self.assertEqual(case.shortDescription(), None)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_cases.pyc b/unit_tests/test_cases.pyc
new file mode 100644 (file)
index 0000000..f5d5a46
Binary files /dev/null and b/unit_tests/test_cases.pyc differ
diff --git a/unit_tests/test_config$py.class b/unit_tests/test_config$py.class
new file mode 100644 (file)
index 0000000..67818a2
Binary files /dev/null and b/unit_tests/test_config$py.class differ
diff --git a/unit_tests/test_config.py b/unit_tests/test_config.py
new file mode 100644 (file)
index 0000000..b58b054
--- /dev/null
@@ -0,0 +1,141 @@
+import re
+import os
+import tempfile
+import unittest
+import warnings
+import pickle
+import sys
+
+import nose.config
+from nose.plugins.manager import DefaultPluginManager
+from nose.plugins.skip import SkipTest
+from nose.plugins.prof import Profile
+
+
+class TestNoseConfig(unittest.TestCase):
+
+    def test_defaults(self):
+        c = nose.config.Config()
+        assert c.addPaths == True
+        # FIXME etc
+
+    def test_reset(self):
+        c = nose.config.Config()
+        c.include = 'include'
+        assert c.include == 'include'
+        c.reset()
+        assert c.include is None
+
+    def test_update(self):
+        c = nose.config.Config()
+        c.update({'exclude':'x'})
+        assert c.exclude == 'x'
+
+    def test_ignore_files_default(self):
+        """
+        The default configuration should have several ignore file settings.
+        """
+        c = nose.config.Config()
+        c.configure(['program'])
+        self.assertEqual(len(c.ignoreFiles), 3)
+    
+    def test_ignore_files_single(self):
+        """A single ignore-files flag should override the default settings.""" 
+        c = nose.config.Config()
+        c.configure(['program', '--ignore-files=a'])
+        self.assertEqual(len(c.ignoreFiles), 1)
+        aMatcher = c.ignoreFiles[0]
+        assert aMatcher.match('a')
+        assert not aMatcher.match('b')
+    
+    def test_ignore_files_multiple(self):
+        """
+        Multiple ignore-files flags should be appended together, overriding
+        the default settings.
+        """
+        c = nose.config.Config()
+        c.configure(['program', '--ignore-files=a', '-Ib'])
+        self.assertEqual(len(c.ignoreFiles), 2)
+        aMatcher, bMatcher = c.ignoreFiles
+        assert aMatcher.match('a')
+        assert not aMatcher.match('b')
+        assert bMatcher.match('b')
+        assert not bMatcher.match('a')
+    
+    def test_multiple_include(self):
+        c = nose.config.Config()
+        c.configure(['program', '--include=a', '--include=b'])
+        self.assertEqual(len(c.include), 2)
+        a, b = c.include
+        assert a.match('a')
+        assert not a.match('b')
+        assert b.match('b')
+        assert not b.match('a')
+
+    def test_single_include(self):
+        c = nose.config.Config()
+        c.configure(['program', '--include=b'])
+        self.assertEqual(len(c.include), 1)
+        b = c.include[0]
+        assert b.match('b')
+        assert not b.match('a')
+
+    def test_plugins(self):
+        c = nose.config.Config()
+        assert c.plugins
+        c.plugins.begin()
+
+    def test_testnames(self):
+        c = nose.config.Config()
+        c.configure(['program', 'foo', 'bar', 'baz.buz.biz'])
+        self.assertEqual(c.testNames, ['foo', 'bar', 'baz.buz.biz'])
+
+        c = nose.config.Config(testNames=['foo'])
+        c.configure([])
+        self.assertEqual(c.testNames, ['foo'])
+
+    def test_where(self):
+        # we don't need to see our own warnings
+        warnings.filterwarnings(action='ignore',
+                                category=DeprecationWarning,
+                                module='nose.config')
+
+        here = os.path.dirname(__file__)
+        support = os.path.join(here, 'support')
+        foo = os.path.abspath(os.path.join(support, 'foo'))
+        c = nose.config.Config()
+        c.configure(['program', '-w', foo, '-w', 'bar'])
+        self.assertEqual(c.workingDir, foo)
+        self.assertEqual(c.testNames, ['bar'])
+
+    def test_progname_looks_like_option(self):
+        # issue #184
+        c = nose.config.Config()
+        # the -v here is the program name, not an option
+        # this matters eg. with python -c "import nose; nose.main()"
+        c.configure(['-v', 'mytests'])
+        self.assertEqual(c.verbosity, 1)
+
+    def test_pickle_empty(self):
+        c = nose.config.Config()
+        cp = pickle.dumps(c)
+        cc = pickle.loads(cp)
+
+    def test_pickle_configured(self):
+        if 'java' in sys.version.lower():
+            raise SkipTest("jython has no profiler plugin")
+        c = nose.config.Config(plugins=DefaultPluginManager())
+        config_args = ['--with-doctest', '--with-coverage', 
+                     '--with-id', '--attr=A', '--collect', '--all',
+                     '--with-isolation', '-d', '--with-xunit', '--processes=2',
+                     '--pdb']
+        if Profile.available():
+            config_args.append('--with-profile')
+        c.configure(config_args)
+        cp = pickle.dumps(c)
+        cc = pickle.loads(cp)
+        assert cc.plugins._plugins
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_config.pyc b/unit_tests/test_config.pyc
new file mode 100644 (file)
index 0000000..b2bf379
Binary files /dev/null and b/unit_tests/test_config.pyc differ
diff --git a/unit_tests/test_config_defaults.rst b/unit_tests/test_config_defaults.rst
new file mode 100644 (file)
index 0000000..944d370
--- /dev/null
@@ -0,0 +1,146 @@
+    >>> from optparse import OptionParser
+    >>> import os
+    >>> from cStringIO import StringIO
+
+    >>> import nose.config
+
+All commandline options to fall back to values configured in
+configuration files.  The configuration lives in a single section
+("nosetests") in each configuration file.
+
+    >>> support = os.path.join(os.path.dirname(__file__), "support",
+    ...                        "config_defaults")
+
+    >>> def error(msg):
+    ...     print "error: %s" % msg
+
+    >>> def get_parser():
+    ...     parser = OptionParser()
+    ...     parser.add_option(
+    ...         "-v", "--verbose",
+    ...         action="count", dest="verbosity",
+    ...         default=1)
+    ...     parser.add_option(
+    ...         "--verbosity", action="store", dest="verbosity",
+    ...         type="int")
+    ...     return nose.config.ConfiguredDefaultsOptionParser(parser,
+    ...                                                       "nosetests",
+    ...                                                       error)
+
+    >>> def parse(args, config_files):
+    ...     argv = ["nosetests"] + list(args)
+    ...     return get_parser().parseArgsAndConfigFiles(argv, config_files)
+
+
+Options on the command line combine with the defaults from the config
+files and the options' own defaults (here, -v adds 1 to verbosity of 3
+from a.cfg).  Config file defaults take precedence over options'
+defaults.
+
+    >>> options, args = parse([], [])
+    >>> options.verbosity
+    1
+    >>> options, args = parse([], os.path.join(support, "a.cfg"))
+    >>> options.verbosity
+    3
+    >>> options, args = parse(["-v"], os.path.join(support, "a.cfg"))
+    >>> options.verbosity
+    4
+
+Command line arguments take precedence
+
+    >>> options, args = parse(["--verbosity=7"], os.path.join(support, "a.cfg"))
+    >>> options.verbosity
+    7
+
+Where options appear in several config files, the last config file wins
+
+    >>> files = [os.path.join(support, "b.cfg"), os.path.join(support, "a.cfg")]
+    >>> options, args = parse([], files)
+    >>> options.verbosity
+    3
+
+
+Invalid values should cause an error specifically about configuration
+files (not about a commandline option)
+
+    >>> options, arguments = parse([], StringIO("""\
+    ... [nosetests]
+    ... verbosity = spam
+    ... """))
+    error: Error reading config file '<???>': option 'verbosity': invalid integer value: 'spam'
+
+Unrecognised option in nosetests config section
+
+    >>> options, args = parse([], StringIO("[nosetests]\nspam=eggs\n"))
+    error: Error reading config file '<???>': no such option 'spam'
+
+If there were multiple config files, the error message tells us which
+file contains the bad option name or value
+
+    >>> options, args = parse([], [os.path.join(support, "a.cfg"),
+    ...                            os.path.join(support, "invalid_value.cfg"),
+    ...                            os.path.join(support, "b.cfg")])
+    ... # doctest: +ELLIPSIS
+    error: Error reading config file '...invalid_value.cfg': option 'verbosity': invalid integer value: 'spam'
+
+
+Invalid config files
+
+(file-like object)
+
+    >>> options, args = parse([], StringIO("spam"))
+    error: Error reading config file '<???>': File contains no section headers.
+    file: <???>, line: 1
+    'spam'
+
+(filename)
+
+    >>> options, args = parse([], os.path.join(support, "invalid.cfg"))
+    ... # doctest: +ELLIPSIS
+    error: Error reading config file '...invalid.cfg': File contains no section headers.
+    file: ...invalid.cfg, line: 1
+    'spam\n'
+
+(filenames, length == 1)
+
+    >>> options, args = parse([], [os.path.join(support, "invalid.cfg")])
+    ... # doctest: +ELLIPSIS
+    error: Error reading config file '...invalid.cfg': File contains no section headers.
+    file: ...invalid.cfg, line: 1
+    'spam\n'
+
+(filenames, length > 1)
+
+If there were multiple config files, the error message tells us which
+file is bad
+
+    >>> options, args = parse([], [os.path.join(support, "a.cfg"),
+    ...                            os.path.join(support, "invalid.cfg"),
+    ...                            os.path.join(support, "b.cfg")])
+    ... # doctest: +ELLIPSIS
+    error: Error reading config file '...invalid.cfg': File contains no section headers.
+    file: ...invalid.cfg, line: 1
+    'spam\n'
+
+
+Missing config files don't deserve an error or warning
+
+(filename)
+
+    >>> options, args = parse([], os.path.join(support, "nonexistent.cfg"))
+    >>> print options.__dict__
+    {'verbosity': 1}
+
+(filenames)
+
+    >>> options, args = parse([], [os.path.join(support, "nonexistent.cfg")])
+    >>> print options.__dict__
+    {'verbosity': 1}
+
+
+The same goes for missing config file section ("nosetests")
+
+    >>> options, args = parse([], StringIO("[spam]\nfoo=bar\n"))
+    >>> print options.__dict__
+    {'verbosity': 1}
diff --git a/unit_tests/test_core$py.class b/unit_tests/test_core$py.class
new file mode 100644 (file)
index 0000000..3fd7cb6
Binary files /dev/null and b/unit_tests/test_core$py.class differ
diff --git a/unit_tests/test_core.py b/unit_tests/test_core.py
new file mode 100644 (file)
index 0000000..6fb6cef
--- /dev/null
@@ -0,0 +1,96 @@
+import os
+import sys
+import unittest
+from cStringIO import StringIO
+from optparse import OptionParser
+import nose.core
+from nose.config import Config
+from nose.tools import set_trace
+from mock import Bucket, MockOptParser
+
+
+class NullLoader:
+    def loadTestsFromNames(self, names):
+        return unittest.TestSuite()
+
+class TestAPI_run(unittest.TestCase):
+
+    def test_restore_stdout(self):
+        print "AHOY"
+        s = StringIO()
+        print s
+        stdout = sys.stdout
+        conf = Config(stream=s)
+        # set_trace()
+        print "About to run"
+        res = nose.core.run(
+            testLoader=NullLoader(), argv=['test_run'], env={}, config=conf)
+        print "Done running"
+        stdout_after = sys.stdout
+        self.assertEqual(stdout, stdout_after)
+
+class Undefined(object):
+    pass
+
+class TestUsage(unittest.TestCase):
+    
+    def test_from_directory(self):
+        usage_txt = nose.core.TestProgram.usage()
+        assert usage_txt.startswith('nose collects tests automatically'), (
+                "Unexpected usage: '%s...'" % usage_txt[0:50].replace("\n", '\n'))
+    
+    def test_from_zip(self):
+        requested_data = []
+        
+        # simulates importing nose from a zip archive
+        # with a zipimport.zipimporter instance
+        class fake_zipimporter(object):
+            
+            prefix = ''
+            zipfile = '<fake zipfile>'
+            
+            def get_data(self, path):
+                requested_data.append(path)
+                return "<usage>"
+                    
+        existing_loader = getattr(nose, '__loader__', Undefined)
+        try:
+            nose.__loader__ = fake_zipimporter()
+            usage_txt = nose.core.TestProgram.usage()
+            self.assertEqual(usage_txt, '<usage>')
+            self.assertEqual(requested_data, ['nose%susage.txt' % os.sep])
+        finally:
+            if existing_loader is not Undefined:
+                nose.__loader__ = existing_loader
+            else:
+                del nose.__loader__
+    
+    def test_from_zip_with_prefix(self):
+        requested_data = []
+        
+        # simulates importing nose from a zip archive
+        # with a zipimport.zipimporter instance
+        class fake_zipimporter(object):
+            
+            prefix = 'PREFIX'
+            zipfile = '<fake zipfile>'
+            
+            def get_data(self, path):
+                requested_data.append(path)
+                return "<usage>"
+                
+        existing_loader = getattr(nose, '__loader__', Undefined)
+        try:            
+            nose.__loader__ = fake_zipimporter()
+            usage_txt = nose.core.TestProgram.usage()
+            self.assertEqual(usage_txt, '<usage>')
+            self.assertEqual(requested_data, 
+                             ['PREFIX%snose%susage.txt' % (os.sep, os.sep)])
+        finally:
+            if existing_loader is not Undefined:
+                nose.__loader__ = existing_loader
+            else:
+                del nose.__loader__
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_core.pyc b/unit_tests/test_core.pyc
new file mode 100644 (file)
index 0000000..f1db5b5
Binary files /dev/null and b/unit_tests/test_core.pyc differ
diff --git a/unit_tests/test_deprecated_plugin$py.class b/unit_tests/test_deprecated_plugin$py.class
new file mode 100644 (file)
index 0000000..1a50a09
Binary files /dev/null and b/unit_tests/test_deprecated_plugin$py.class differ
diff --git a/unit_tests/test_deprecated_plugin.py b/unit_tests/test_deprecated_plugin.py
new file mode 100644 (file)
index 0000000..6c62481
--- /dev/null
@@ -0,0 +1,131 @@
+import unittest
+from nose.config import Config
+from nose.plugins.deprecated import Deprecated, DeprecatedTest
+from nose.result import TextTestResult, _TextTestResult
+from StringIO import StringIO
+from optparse import OptionParser
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+
+
+class TestDeprecatedPlugin(unittest.TestCase):
+
+    def test_api_present(self):
+        sk = Deprecated()
+        sk.addOptions
+        sk.configure
+        sk.prepareTestResult        
+
+    def test_prepare_patches_result(self):
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 1)
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+        res._orig_addError
+        res._orig_printErrors
+        res._orig_wasSuccessful
+        res.deprecated
+        self.assertEqual(
+            res.errorClasses,
+            {DeprecatedTest: (res.deprecated, 'DEPRECATED', False)})
+
+        # result w/out print works too
+        res = unittest.TestResult()
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+        res._orig_addError
+        res.deprecated
+        self.assertEqual(
+            res.errorClasses,
+            {DeprecatedTest: (res.deprecated, 'DEPRECATED', False)})
+
+    def test_patched_result_handles_deprecated(self):
+        res = unittest.TestResult()
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+
+        class TC(unittest.TestCase):
+            def test(self):
+                raise DeprecatedTest('deprecated me')
+
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Deprecated was not caught: %s" % res.errors
+        assert res.deprecated
+        assert res.deprecated[0][0] is test
+
+    def test_patches_only_when_needed(self):
+        class NoPatch(unittest.TestResult):
+            def __init__(self):
+                self.errorClasses = {}
+                
+        res = NoPatch()
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+        assert not hasattr(res, '_orig_addError'), \
+               "Deprecated patched a result class it didn't need to patch"
+        
+
+    def test_deprecated_output(self):
+        class TC(unittest.TestCase):
+            def test(self):
+                raise DeprecatedTest('deprecated me')
+
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 1)
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Deprecated was not caught: %s" % res.errors
+        assert res.deprecated            
+
+        res.printErrors()
+        out = stream.getvalue()
+        assert out
+        assert out.strip() == "D"
+        assert res.wasSuccessful()
+
+    def test_deprecated_output_verbose(self):
+
+        class TC(unittest.TestCase):
+            def test(self):
+                raise DeprecatedTest('deprecated me too')
+        
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, verbosity=2)
+        sk = Deprecated()
+        sk.prepareTestResult(res)
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Deprecated was not caught: %s" % res.errors
+        assert res.deprecated            
+
+        res.printErrors()
+        out = stream.getvalue()
+        print out
+        assert out
+
+        assert ' ... DEPRECATED' in out
+        assert 'deprecated me too' in out
+
+    def test_enabled_by_default(self):
+        sk = Deprecated()
+        assert sk.enabled, "Deprecated was not enabled by default"
+
+    def test_can_be_disabled(self):
+        parser = OptionParser()
+        sk = Deprecated()
+        sk.addOptions(parser)
+        options, args = parser.parse_args(['--no-deprecated'])
+        sk.configure(options, Config())
+        assert not sk.enabled, \
+               "Deprecated was not disabled by noDeprecated option"
+        
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_deprecated_plugin.pyc b/unit_tests/test_deprecated_plugin.pyc
new file mode 100644 (file)
index 0000000..a75385d
Binary files /dev/null and b/unit_tests/test_deprecated_plugin.pyc differ
diff --git a/unit_tests/test_doctest_error_handling$py.class b/unit_tests/test_doctest_error_handling$py.class
new file mode 100644 (file)
index 0000000..722e8a6
Binary files /dev/null and b/unit_tests/test_doctest_error_handling$py.class differ
diff --git a/unit_tests/test_doctest_error_handling.py b/unit_tests/test_doctest_error_handling.py
new file mode 100644 (file)
index 0000000..fcdf388
--- /dev/null
@@ -0,0 +1,40 @@
+import os
+import sys
+import unittest
+from nose.config import Config
+from nose.plugins import doctests
+from mock import Bucket
+
+class TestDoctestErrorHandling(unittest.TestCase):
+
+    def setUp(self):
+        self._path = sys.path[:]
+        here = os.path.dirname(__file__)
+        testdir = os.path.join(here, 'support', 'doctest')
+        sys.path.insert(0, testdir)
+        p = doctests.Doctest()
+        p.can_configure = True
+        p.configure(Bucket(), Config())
+        self.p = p
+        
+    def tearDown(self):
+        sys.path = self._path[:]
+        
+    def test_no_doctests_in_file(self):
+        p = self.p
+        mod = __import__('no_doctests')
+        loaded = [ t for t in p.loadTestsFromModule(mod) ]
+        assert not loaded, "Loaded %s from empty module" % loaded
+
+    def test_err_doctests_raises_exception(self):
+        p = self.p
+        mod = __import__('err_doctests')
+        try:
+            loaded = [ t for t in p.loadTestsFromModule(mod) ]
+        except ValueError:
+            pass
+        else:
+            self.fail("Error doctests file did not raise ValueError")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_doctest_error_handling.pyc b/unit_tests/test_doctest_error_handling.pyc
new file mode 100644 (file)
index 0000000..e43f43c
Binary files /dev/null and b/unit_tests/test_doctest_error_handling.pyc differ
diff --git a/unit_tests/test_doctest_munging.rst b/unit_tests/test_doctest_munging.rst
new file mode 100644 (file)
index 0000000..fdbce64
--- /dev/null
@@ -0,0 +1,105 @@
+doctest output normalization for plugin testing support
+=======================================================
+
+nose.plugins.plugintest.run() is used for testing nose plugins in
+doctests, so it needs to normalise nose output to remove information
+that is not of interest to most plugin tests.
+
+We strip stack trace from formatted exceptions, using a regexp copied
+from ``doctest.py``.  That regexp always matches to the end of a
+string, so we split on blank lines before running the regexp on each
+resulting block.
+
+    >>> from nose.plugins.plugintest import blankline_separated_blocks
+    >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar\n\n"))
+    ['spam\neggs\n\n', 'foo\nbar\n\n']
+    >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar\n"))
+    ['spam\neggs\n\n', 'foo\nbar\n']
+    >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar"))
+    ['spam\neggs\n\n', 'foo\nbar']
+    >>> list(blankline_separated_blocks(""))
+    []
+    >>> list(blankline_separated_blocks("spam"))
+    ['spam']
+
+``remove_stack_traces`` removes the stack traces, replacing them with
+an ellipsis.  Note the first line here is chosen not to be "Traceback
+(most recent...", since doctest would interpret that as meaning that
+the example should raise an exception!
+
+    >>> from nose.plugins.plugintest import remove_stack_traces
+    >>> print remove_stack_traces("""\
+    ... Ceci n'est pas une traceback.
+    ... Traceback (most recent call last):
+    ...   File "/some/dir/foomodule.py", line 15, in runTest
+    ...   File "/some/dir/spam.py", line 293, in who_knows_what
+    ... AssertionError: something bad happened
+    ... """)
+    Ceci n'est pas une traceback.
+    Traceback (most recent call last):
+    ...
+    AssertionError: something bad happened
+    <BLANKLINE>
+
+Multiple tracebacks in an example are all replaced, as long as they're
+separated by blank lines.
+
+    >>> print remove_stack_traces("""\
+    ... Ceci n'est pas une traceback.
+    ... Traceback (most recent call last):
+    ...   File spam
+    ... AttributeError: eggs
+    ...
+    ... Traceback (most recent call last):
+    ...   File eggs
+    ... AttributeError: spam
+    ... """)
+    Ceci n'est pas une traceback.
+    Traceback (most recent call last):
+    ...
+    AttributeError: eggs
+    <BLANKLINE>
+    Traceback (most recent call last):
+    ...
+    AttributeError: spam
+    <BLANKLINE>
+
+
+Putting it together, ``munge_nose_output_for_doctest()`` removes stack
+traces, removes test timings from "Ran n test(s)" output, and strips
+trailing blank lines.
+
+    >>> from nose.plugins.plugintest import munge_nose_output_for_doctest
+    >>> print munge_nose_output_for_doctest("""\
+    ... runTest (foomodule.PassingTest) ... ok
+    ... runTest (foomodule.FailingTest) ... FAIL
+    ...
+    ... ======================================================================
+    ... FAIL: runTest (foomodule.FailingTest)
+    ... ----------------------------------------------------------------------
+    ... Traceback (most recent call last):
+    ...   File "/some/dir/foomodule.py", line 15, in runTest
+    ...   File "/some/dir/spam.py", line 293, in who_knows_what
+    ... AssertionError: something bad happened
+    ...
+    ... ----------------------------------------------------------------------
+    ... Ran 1 test in 0.082s
+    ...
+    ... FAILED (failures=1)
+    ...
+    ...
+    ... """)
+    runTest (foomodule.PassingTest) ... ok
+    runTest (foomodule.FailingTest) ... FAIL
+    <BLANKLINE>
+    ======================================================================
+    FAIL: runTest (foomodule.FailingTest)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    AssertionError: something bad happened
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    Ran 1 test in ...s
+    <BLANKLINE>
+    FAILED (failures=1)
diff --git a/unit_tests/test_id_plugin$py.class b/unit_tests/test_id_plugin$py.class
new file mode 100644 (file)
index 0000000..25407e2
Binary files /dev/null and b/unit_tests/test_id_plugin$py.class differ
diff --git a/unit_tests/test_id_plugin.py b/unit_tests/test_id_plugin.py
new file mode 100644 (file)
index 0000000..d70fc07
--- /dev/null
@@ -0,0 +1,20 @@
+import unittest
+from nose.config import Config
+from nose.plugins.builtin import TestId
+import mock
+
+class TestTestIdPlugin(unittest.TestCase):
+
+    def test_default_id_file_is_in_working_dir(self):
+        tid = TestId()
+        c = Config()
+        opt = mock.Bucket()
+        opt.testIdFile = '.noseids'
+        tid.configure(opt, c)
+        print tid.idfile
+        assert tid.idfile.startswith(c.workingDir), \
+               "%s is not under %s" % (tid.idfile, c.workingDir)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_id_plugin.pyc b/unit_tests/test_id_plugin.pyc
new file mode 100644 (file)
index 0000000..37d4d41
Binary files /dev/null and b/unit_tests/test_id_plugin.pyc differ
diff --git a/unit_tests/test_importer$py.class b/unit_tests/test_importer$py.class
new file mode 100644 (file)
index 0000000..11b3a9c
Binary files /dev/null and b/unit_tests/test_importer$py.class differ
diff --git a/unit_tests/test_importer.py b/unit_tests/test_importer.py
new file mode 100644 (file)
index 0000000..91de8a9
--- /dev/null
@@ -0,0 +1,55 @@
+import os
+import sys
+import unittest
+import nose.config
+import nose.importer
+
+class TestImporter(unittest.TestCase):
+
+    def setUp(self):
+        self.p = sys.path[:]
+
+    def tearDown(self):
+        sys.path = self.p[:]
+    
+    def test_add_paths(self):
+        where = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'support'))
+        foo = os.path.join(where, 'foo')
+        foobar = os.path.join(foo, 'bar')
+        nose.importer.add_path(foobar)
+        
+        assert not foobar in sys.path
+        assert not foo in sys.path
+        assert where in sys.path
+        assert sys.path[0] == where, "%s first should be %s" % (sys.path, where)
+
+    def test_import(self):
+        where = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'support'))
+        foo = os.path.join(where, 'foo')
+        foobar = os.path.join(foo, 'bar')
+
+        imp = nose.importer.Importer()
+        mod = imp.importFromDir(foobar, 'buz')
+        assert where in sys.path
+        # buz has an intra-package import that sets boodle
+        assert mod.boodle
+
+    def test_module_no_file(self):
+        where = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'support'))
+        foo = os.path.join(where, 'foo')
+        foobar = os.path.join(foo, 'bar')
+
+        # something that's not a real module and has no __file__
+        sys.modules['buz'] = 'Whatever'
+
+        imp = nose.importer.Importer()
+        mod = imp.importFromDir(foobar, 'buz')
+        assert where in sys.path
+        # buz has an intra-package import that sets boodle
+        assert mod.boodle
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_importer.pyc b/unit_tests/test_importer.pyc
new file mode 100644 (file)
index 0000000..d6f7d5f
Binary files /dev/null and b/unit_tests/test_importer.pyc differ
diff --git a/unit_tests/test_inspector$py.class b/unit_tests/test_inspector$py.class
new file mode 100644 (file)
index 0000000..17c8deb
Binary files /dev/null and b/unit_tests/test_inspector$py.class differ
diff --git a/unit_tests/test_inspector.py b/unit_tests/test_inspector.py
new file mode 100644 (file)
index 0000000..d5e7542
--- /dev/null
@@ -0,0 +1,149 @@
+import inspect
+import sys
+import textwrap
+import tokenize
+import traceback
+import unittest
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+from nose.inspector import inspect_traceback, Expander, tbsource
+
+class TestExpander(unittest.TestCase):
+
+    def test_simple_inspect_frame(self):
+        src = StringIO('a > 2')
+        lc = { 'a': 2}
+        gb = {}
+        exp = Expander(lc, gb)
+        
+        for tok in tokenize.generate_tokens(src.readline):
+            exp(*tok)
+        # print "'%s'" % exp.expanded_source
+        self.assertEqual(exp.expanded_source.strip(), '2 > 2')
+
+    def test_inspect_traceback_continued(self):
+        a = 6
+        out = ''
+        try:
+            assert a < 1, \
+                "This is a multline expression"
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            out = inspect_traceback(tb)
+            # print "'%s'" % out.strip()
+            self.assertEqual(out.strip(),
+                             '>>  assert 6 < 1, \\\n        '
+                             '"This is a multline expression"')
+
+    def test_get_tb_source_simple(self):
+        # no func frame
+        try:
+            assert False
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            lines, lineno = tbsource(tb, 1)
+            self.assertEqual(''.join(lines).strip(), 'assert False')
+            self.assertEqual(lineno, 0)
+
+    def test_get_tb_source_func(self):        
+        # func frame
+        def check_even(n):
+            print n
+            assert n % 2 == 0
+        try:
+            check_even(1)
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            lines, lineno = tbsource(tb)
+            out = textwrap.dedent(''.join(lines))
+            if sys.version_info < (3,):
+                first_line = '    print n\n'
+            else:
+                first_line = '    print(n)\n'
+            self.assertEqual(out,
+                             first_line +
+                             '    assert n % 2 == 0\n'
+                             'try:\n'
+                             '    check_even(1)\n'
+                             'except AssertionError:\n'
+                             '    et, ev, tb = sys.exc_info()\n'
+                             )
+            self.assertEqual(lineno, 3)
+            
+        # FIXME 2 func frames
+            
+    def test_pick_tb_lines(self):
+        try:
+            val = "fred"
+            def defred(n):
+                return n.replace('fred','')
+            assert defred(val) == 'barney', "Fred - fred != barney?"
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            out = inspect_traceback(tb)
+            # print "'%s'" % out.strip()
+            self.assertEqual(out.strip(),
+                             ">>  assert defred('fred') == 'barney', " 
+                             '"Fred - fred != barney?"')
+        try:
+            val = "fred"
+            def defred(n):
+                return n.replace('fred','')
+            assert defred(val) == 'barney', \
+                "Fred - fred != barney?"
+            def refred(n):
+                return n + 'fred'
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            out = inspect_traceback(tb)
+            #print "'%s'" % out.strip()
+            self.assertEqual(out.strip(),
+                             ">>  assert defred('fred') == 'barney', " 
+                             '\\\n        "Fred - fred != barney?"')
+
+        S = {'setup':1}
+        def check_even(n, nn):
+            assert S['setup']
+            print n, nn
+            assert n % 2 == 0 or nn % 2 == 0
+        try:
+            check_even(1, 3)
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            out = inspect_traceback(tb)
+            print "'%s'" % out.strip()
+            if sys.version_info < (3,):
+                print_line = "    print 1, 3\n"
+            else:
+                print_line = "    print(1, 3)\n"
+            self.assertEqual(out.strip(),
+                             "assert {'setup': 1}['setup']\n" +
+                             print_line +
+                             ">>  assert 1 % 2 == 0 or 3 % 2 == 0")
+            
+    def test_bug_95(self):
+        """Test that inspector can handle multi-line docstrings"""
+        try:
+            """docstring line 1
+            docstring line 2
+            """
+            a = 2
+            assert a == 4
+        except AssertionError:
+            et, ev, tb = sys.exc_info()
+            out = inspect_traceback(tb)
+            print "'%s'" % out.strip()
+            self.assertEqual(out.strip(),
+                             "2 = 2\n"
+                             ">>  assert 2 == 4")
+        
+if __name__ == '__main__':
+    #import logging
+    #logging.basicConfig()
+    #logging.getLogger('').setLevel(10)
+    unittest.main()
+    
diff --git a/unit_tests/test_inspector.pyc b/unit_tests/test_inspector.pyc
new file mode 100644 (file)
index 0000000..08b3b94
Binary files /dev/null and b/unit_tests/test_inspector.pyc differ
diff --git a/unit_tests/test_isolation_plugin$py.class b/unit_tests/test_isolation_plugin$py.class
new file mode 100644 (file)
index 0000000..0e69f8e
Binary files /dev/null and b/unit_tests/test_isolation_plugin$py.class differ
diff --git a/unit_tests/test_isolation_plugin.py b/unit_tests/test_isolation_plugin.py
new file mode 100644 (file)
index 0000000..497fe6d
--- /dev/null
@@ -0,0 +1,2 @@
+def test_lint():
+    import nose.plugins.isolate
diff --git a/unit_tests/test_isolation_plugin.pyc b/unit_tests/test_isolation_plugin.pyc
new file mode 100644 (file)
index 0000000..830ffba
Binary files /dev/null and b/unit_tests/test_isolation_plugin.pyc differ
diff --git a/unit_tests/test_issue155.rst b/unit_tests/test_issue155.rst
new file mode 100644 (file)
index 0000000..450866a
--- /dev/null
@@ -0,0 +1,46 @@
+AttributeError from a method call should not be hidden by exception
+handling intended to ignore the case where the method is not present.
+
+    >>> import sys
+    >>> import unittest
+
+    >>> import nose.case
+    >>> import nose.proxy
+    >>> import nose.result
+    >>> import nose.util
+    >>> import nose.plugins.doctests
+
+    >>> class Result(nose.result.TextTestResult):
+    ...
+    ...     def afterTest(self, test):
+    ...         raise AttributeError("bug in Result")
+    ...
+    ...     def beforeTest(self, test):
+    ...         raise AttributeError("bug in Result")
+
+    >>> class TestCase(unittest.TestCase):
+    ...
+    ...     def address(self):
+    ...         raise AttributeError("bug in TestCase")
+    ...
+    ...     def runTest(self):
+    ...         pass
+
+
+    >>> test = nose.case.Test(TestCase())
+    >>> result = Result(sys.stdout, True, 1)
+    >>> proxy = nose.proxy.ResultProxy(result, test)
+    >>> proxy.beforeTest(test)
+    Traceback (most recent call last):
+    AttributeError: bug in Result
+    >>> proxy.afterTest(test)
+    Traceback (most recent call last):
+    AttributeError: bug in Result
+
+    >>> test.address()
+    Traceback (most recent call last):
+    AttributeError: bug in TestCase
+
+    >>> nose.util.test_address(test)
+    Traceback (most recent call last):
+    AttributeError: bug in TestCase
diff --git a/unit_tests/test_issue270.rst b/unit_tests/test_issue270.rst
new file mode 100644 (file)
index 0000000..4e608a5
--- /dev/null
@@ -0,0 +1,22 @@
+Multiprocess test collection from packages
+------------------------------------------
+
+Tests that the multiprocess plugin correctly collects tests from packages
+
+    >>> import os
+    >>> from nose.plugins.plugintest import run_buffered as run
+    >>> from nose.plugins.multiprocess import MultiProcess
+    >>> support = os.path.join(os.path.dirname(__file__), 'support')
+    >>> issue270 = os.path.join(support, 'issue270')
+
+The test package has a package-level fixture, which causes the entire package
+to be dispatched to a multiprocess worker. Tests are still collected and run
+properly.
+
+    >>> argv = [__file__, '--processes=2', issue270]
+    >>> run(argv=argv, plugins=[MultiProcess()])
+    ..
+    ----------------------------------------------------------------------
+    Ran 2 tests in ...s
+    <BLANKLINE>
+    OK
diff --git a/unit_tests/test_issue270_fixtures$py.class b/unit_tests/test_issue270_fixtures$py.class
new file mode 100644 (file)
index 0000000..6d6f178
Binary files /dev/null and b/unit_tests/test_issue270_fixtures$py.class differ
diff --git a/unit_tests/test_issue270_fixtures.py b/unit_tests/test_issue270_fixtures.py
new file mode 100644 (file)
index 0000000..d18abba
--- /dev/null
@@ -0,0 +1,11 @@
+from nose.plugins.skip import SkipTest
+from nose.plugins.multiprocess import MultiProcess
+
+def setup_module():
+    try:
+        import multiprocessing
+        if 'active' in MultiProcess.status:
+            raise SkipTest("Multiprocess plugin is active. Skipping tests of "
+                           "plugin itself.")
+    except ImportError:
+        raise SkipTest("multiprocessing module not available")
diff --git a/unit_tests/test_issue270_fixtures.pyc b/unit_tests/test_issue270_fixtures.pyc
new file mode 100644 (file)
index 0000000..4653eb1
Binary files /dev/null and b/unit_tests/test_issue270_fixtures.pyc differ
diff --git a/unit_tests/test_issue_006$py.class b/unit_tests/test_issue_006$py.class
new file mode 100644 (file)
index 0000000..9e8d48d
Binary files /dev/null and b/unit_tests/test_issue_006$py.class differ
diff --git a/unit_tests/test_issue_006.py b/unit_tests/test_issue_006.py
new file mode 100644 (file)
index 0000000..d04c174
--- /dev/null
@@ -0,0 +1,31 @@
+import os
+import unittest
+
+class TestIssue006(unittest.TestCase):
+    def test_load_nested_generator(self):
+        from nose.config import Config
+        from nose.loader import TestLoader
+
+        where = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'support', 'issue006'))
+        l = TestLoader()
+        testmod = iter(l.loadTestsFromName(where)).next()
+        print testmod
+        testmod.setUp()
+
+        testcase = iter(testmod).next()
+        expect = [
+            ['tests.Test1.test_nested_generator'],
+            ['tests.Test1.test_nested_generator_mult(1,)',
+             'tests.Test1.test_nested_generator_mult(2,)',
+             'tests.Test1.test_nested_generator_mult(3,)'],
+            ['tests.Test1.test_normal_generator(1,)',
+             'tests.Test1.test_normal_generator(2,)']
+            ]
+        for test in testcase:
+            tests = map(str, test)
+            print tests
+            self.assertEqual(tests, expect.pop(0))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_issue_006.pyc b/unit_tests/test_issue_006.pyc
new file mode 100644 (file)
index 0000000..f4b17b9
Binary files /dev/null and b/unit_tests/test_issue_006.pyc differ
diff --git a/unit_tests/test_issue_064$py.class b/unit_tests/test_issue_064$py.class
new file mode 100644 (file)
index 0000000..b3d4b70
Binary files /dev/null and b/unit_tests/test_issue_064$py.class differ
diff --git a/unit_tests/test_issue_064.py b/unit_tests/test_issue_064.py
new file mode 100644 (file)
index 0000000..5bf1ca8
--- /dev/null
@@ -0,0 +1,2 @@
+def test_is_generator_alias():
+    from nose.util import is_generator, isgenerator
diff --git a/unit_tests/test_issue_064.pyc b/unit_tests/test_issue_064.pyc
new file mode 100644 (file)
index 0000000..37a9737
Binary files /dev/null and b/unit_tests/test_issue_064.pyc differ
diff --git a/unit_tests/test_issue_065$py.class b/unit_tests/test_issue_065$py.class
new file mode 100644 (file)
index 0000000..0835d5d
Binary files /dev/null and b/unit_tests/test_issue_065$py.class differ
diff --git a/unit_tests/test_issue_065.py b/unit_tests/test_issue_065.py
new file mode 100644 (file)
index 0000000..425f197
--- /dev/null
@@ -0,0 +1,20 @@
+import os
+from nose import loader
+import unittest
+
+support = os.path.join(os.path.dirname(__file__), 'support')
+
+class TestIssue065(unittest.TestCase):
+    def test_dict_wrapper_instance_not_loaded(self):
+        wd = os.path.join(support, 'issue065')
+        l = loader.TestLoader() #workingDir=wd)
+        tests = l.loadTestsFromDir(wd)
+        tests = list(tests)
+        self.assertEqual(len(tests), 1)
+        tests = list(tests[0])
+        assert not tests, "Tests were loaded from module with no tests"
+        
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_issue_065.pyc b/unit_tests/test_issue_065.pyc
new file mode 100644 (file)
index 0000000..ebe392f
Binary files /dev/null and b/unit_tests/test_issue_065.pyc differ
diff --git a/unit_tests/test_issue_100.rst b/unit_tests/test_issue_100.rst
new file mode 100644 (file)
index 0000000..ea5d9d1
--- /dev/null
@@ -0,0 +1,12 @@
+This is a test of the bug reported in issue 100: test.address() fails
+for a case defined in a doctest.
+
+    >>> import unittest
+    >>> import nose.case
+    >>> class SimpleTest(unittest.TestCase):
+    ...
+    ...     def runTest(self):
+    ...         pass
+    >>> test = nose.case.Test(SimpleTest())
+    >>> test.address()
+    (None, '__builtin__', 'SimpleTest.runTest')
diff --git a/unit_tests/test_issue_100.rst.py3.patch b/unit_tests/test_issue_100.rst.py3.patch
new file mode 100644 (file)
index 0000000..58ba2a3
--- /dev/null
@@ -0,0 +1,8 @@
+--- test_issue_100.rst.orig    2010-08-30 19:31:28.000000000 -0700
++++ test_issue_100.rst 2010-08-30 19:31:41.000000000 -0700
+@@ -9,4 +9,4 @@
+     ...         pass
+     >>> test = nose.case.Test(SimpleTest())
+     >>> test.address()
+-    (None, '__builtin__', 'SimpleTest.runTest')
++    (None, 'builtins', 'SimpleTest.runTest')
diff --git a/unit_tests/test_issue_101$py.class b/unit_tests/test_issue_101$py.class
new file mode 100644 (file)
index 0000000..03cb918
Binary files /dev/null and b/unit_tests/test_issue_101$py.class differ
diff --git a/unit_tests/test_issue_101.py b/unit_tests/test_issue_101.py
new file mode 100644 (file)
index 0000000..44918dc
--- /dev/null
@@ -0,0 +1,27 @@
+import sys
+import unittest
+import warnings
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from nose.exc import SkipTest
+
+class TestErrorClassWithStringException(unittest.TestCase):
+
+    def test_string_exception_not_masked(self):
+        if sys.version_info >= (3,):
+            raise SkipTest("Python 3.x does not support string exceptions")
+
+        class X(Exception):
+            pass
+
+        class EP(ErrorClassPlugin):
+            xes = ErrorClass(X, label='XXX', isfailure=True)
+
+        warnings.filterwarnings(action='ignore', category=DeprecationWarning)
+        try:
+
+            raise "oh no!"
+        except:
+            exc = sys.exc_info()
+        
+        ep = EP()
+        self.assertEqual(ep.addError(None, exc), None)
diff --git a/unit_tests/test_issue_101.pyc b/unit_tests/test_issue_101.pyc
new file mode 100644 (file)
index 0000000..e5ccbc1
Binary files /dev/null and b/unit_tests/test_issue_101.pyc differ
diff --git a/unit_tests/test_issue_159.rst b/unit_tests/test_issue_159.rst
new file mode 100644 (file)
index 0000000..5ab7964
--- /dev/null
@@ -0,0 +1,6 @@
+    >>> from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+    >>> class X(Exception):
+    ...     pass
+    >>> xes = ErrorClass(X, label='XXX')
+    Traceback (most recent call last):
+    TypeError: 'isfailure' is a required named argument for ErrorClass
diff --git a/unit_tests/test_issue_227$py.class b/unit_tests/test_issue_227$py.class
new file mode 100644 (file)
index 0000000..b5e24f6
Binary files /dev/null and b/unit_tests/test_issue_227$py.class differ
diff --git a/unit_tests/test_issue_227.py b/unit_tests/test_issue_227.py
new file mode 100644 (file)
index 0000000..140862e
--- /dev/null
@@ -0,0 +1,12 @@
+# -*- encoding: utf-8 -*-
+from nose.plugins.skip import SkipTest
+import sys
+
+
+def setup():
+    if 'java' in sys.version.lower():
+        raise SkipTest("StringIO() in jython can't handle unicode")
+
+
+def test_unicode():
+    print u'b\u00f6y'
diff --git a/unit_tests/test_issue_227.pyc b/unit_tests/test_issue_227.pyc
new file mode 100644 (file)
index 0000000..6ad2f2f
Binary files /dev/null and b/unit_tests/test_issue_227.pyc differ
diff --git a/unit_tests/test_issue_230$py.class b/unit_tests/test_issue_230$py.class
new file mode 100644 (file)
index 0000000..a7497cd
Binary files /dev/null and b/unit_tests/test_issue_230$py.class differ
diff --git a/unit_tests/test_issue_230.py b/unit_tests/test_issue_230.py
new file mode 100644 (file)
index 0000000..41a717b
--- /dev/null
@@ -0,0 +1,21 @@
+import os
+import unittest
+
+class TestIssue230(unittest.TestCase):
+
+    def test_generator_yield_value(self):
+        from nose.loader import TestLoader
+
+        def test():
+            pass
+        def gen():
+            yield test
+
+        loader = TestLoader()
+        suite = loader.loadTestsFromGenerator(gen, module=None)
+        testcase = iter(suite).next()
+        self.assertEqual(testcase.test.test, test)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_issue_230.pyc b/unit_tests/test_issue_230.pyc
new file mode 100644 (file)
index 0000000..cad18dd
Binary files /dev/null and b/unit_tests/test_issue_230.pyc differ
diff --git a/unit_tests/test_lazy_suite$py.class b/unit_tests/test_lazy_suite$py.class
new file mode 100644 (file)
index 0000000..4d2bbd0
Binary files /dev/null and b/unit_tests/test_lazy_suite$py.class differ
diff --git a/unit_tests/test_lazy_suite.py b/unit_tests/test_lazy_suite.py
new file mode 100644 (file)
index 0000000..79cdcf0
--- /dev/null
@@ -0,0 +1,21 @@
+import unittest
+from nose.suite import LazySuite
+from helpers import iter_compat
+
+def gen():
+    for x in range(0, 10):
+        yield TestLazySuite.TC('test')
+
+class TestLazySuite(unittest.TestCase):
+
+    class TC(unittest.TestCase):
+        def test(self):
+            pass
+        
+    def test_basic_iteration(self):        
+        ls = LazySuite(gen)
+        for t in iter_compat(ls):
+            assert isinstance(t, unittest.TestCase)
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_lazy_suite.pyc b/unit_tests/test_lazy_suite.pyc
new file mode 100644 (file)
index 0000000..0ce78a2
Binary files /dev/null and b/unit_tests/test_lazy_suite.pyc differ
diff --git a/unit_tests/test_loader$py.class b/unit_tests/test_loader$py.class
new file mode 100644 (file)
index 0000000..b3dfad4
Binary files /dev/null and b/unit_tests/test_loader$py.class differ
diff --git a/unit_tests/test_loader.py b/unit_tests/test_loader.py
new file mode 100644 (file)
index 0000000..08f0686
--- /dev/null
@@ -0,0 +1,501 @@
+import imp
+import os
+import sys
+import unittest
+from nose.loader import TestLoader as Loader
+
+from nose import util, loader, selector # so we can set mocks
+import nose.case
+
+
+def safepath(p):
+    """Helper function to make cross-platform safe paths
+    """
+    return p.replace('/', os.sep)
+
+
+def mods():
+    #
+    # Setting up the fake modules that we'll use for testing
+    # test loading
+    #
+    M = {}
+    M['test_module'] = imp.new_module('test_module')
+    M['module'] = imp.new_module('module')
+    M['package'] = imp.new_module('package')
+    M['package'].__path__ = [safepath('/package')]
+    M['package'].__file__ = safepath('/package/__init__.py')
+    M['package.subpackage'] = imp.new_module('package.subpackage')
+    M['package'].subpackage = M['package.subpackage']
+    M['package.subpackage'].__path__ = [safepath('/package/subpackage')]
+    M['package.subpackage'].__file__ = safepath(
+        '/package/subpackage/__init__.py')
+    M['test_module_with_generators'] = imp.new_module(
+        'test_module_with_generators')
+    M['test_module_with_metaclass_tests'] = imp.new_module(
+        'test_module_with_metaclass_tests')
+
+    # a unittest testcase subclass
+    class TC(unittest.TestCase):
+        def runTest(self):
+            pass
+
+    class TC2(unittest.TestCase):
+        def runTest(self):
+            pass
+    
+    # test class that uses a metaclass
+    class TCType(type):
+        def __new__(cls, name, bases, dct):
+            return type.__new__(cls, name, bases, dct)
+    class TestMetaclassed(object):
+        __metaclass__ = TCType
+        def test_one(self):
+            pass
+        def test_two(self):
+            pass
+
+    # test function
+    def test_func():
+        pass
+
+    # non-testcase-subclass test class
+    class TestClass:
+
+        def test_func(self):
+            pass
+
+        def test_generator_inline(self):
+            """docstring for test generator inline
+            """
+            def test_odd(v):
+                assert v % 2
+            for i in range(0, 4):
+                yield test_odd, i
+
+        def test_generator_method(self):
+            """docstring for test generator method
+            """
+            for i in range(0, 4):
+                yield self.try_odd, i
+
+        def test_generator_method_name(self):
+            """docstring for test generator method name
+            """
+            for i in range(0, 4):
+                yield 'try_odd', i
+
+        def try_odd(self, v):
+            assert v % 2
+
+    # test function that is generator
+    def test_func_generator():
+        """docstring for test func generator
+        """
+        def test_odd(v):
+            assert v % 2
+        for i in range(0, 4):
+            yield test_odd, i
+
+    def test_func_generator_name():
+        """docstring for test func generator name
+        """
+        for i in range(0, 4):
+            yield 'try_odd', i
+
+    def try_odd(v):
+        assert v % 2
+
+    M['nose'] = nose
+    M['__main__'] = sys.modules['__main__']
+    M['test_module'].TC = TC
+    TC.__module__ = 'test_module'
+    M['test_module'].test_func = test_func
+    test_func.__module__ = 'test_module'
+    M['module'].TC2 = TC2
+    TC2.__module__ = 'module'
+    M['test_module_with_generators'].TestClass = TestClass
+    TestClass.__module__ = 'test_module_with_generators'
+    M['test_module_with_generators'].test_func_generator = test_func_generator
+    M['test_module_with_generators'].test_func_generator_name = \
+        test_func_generator_name
+    M['test_module_with_generators'].try_odd = try_odd
+    test_func_generator_name.__module__ = 'test_module_with_generators'
+    test_func_generator.__module__ = 'test_module_with_generators'
+    try_odd.__module__ = 'test_module_with_generators'
+    M['test_module_with_metaclass_tests'].TestMetaclassed = TestMetaclassed
+    TestMetaclassed.__module__ = 'test_module_with_metaclass_tests'
+    del TC
+    del TC2
+    del TestMetaclassed
+    # del TCType
+    del test_func
+    del TestClass
+    del test_func_generator
+    return M
+
+M = mods()
+
+# Mock the filesystem access so we don't have to maintain
+# a support dir with real files
+_listdir = os.listdir
+_isdir = os.path.isdir
+_isfile = os.path.isfile
+_exists = os.path.exists
+_import = __import__
+
+
+#
+# Mock functions
+#
+def mock_listdir(path):
+    if path.endswith(safepath('/package')):
+        return ['.', '..', 'subpackage', '__init__.py']
+    elif path.endswith(safepath('/subpackage')):
+        return ['.', '..', '__init__.py']
+    elif path.endswith(safepath('/sort')):
+        return ['.', '..', 'lib', 'src', 'test', 'test_module.py', 'a_test']
+    return ['.', '..', 'test_module.py', 'module.py']
+
+
+def mock_isdir(path):
+    print "is dir '%s'?" % path
+    paths = map(safepath, [
+        '/a/dir/path', '/package',
+        '/package/subpackage', '/sort/lib',
+        '/sort/src', '/sort/a_test',
+        '/sort/test', '/sort'])
+    paths = paths + map(os.path.abspath, paths)
+    if path in paths:
+        return True
+    return False
+
+
+def mock_isfile(path):
+    if path in ('.', '..'):
+        return False
+    return '.' in path
+
+
+def mock_exists(path):
+    print "exists '%s'?" % path
+    paths = map(safepath, [
+        '/package', '/package/__init__.py', '/package/subpackage',
+        '/package/subpackage/__init__.py'
+        ])
+    paths = paths + map(os.path.abspath, paths)
+    return path in paths
+
+
+def mock_import(modname, gl=None, lc=None, fr=None):
+    if gl is None:
+        gl = M
+    if lc is None:
+        lc = locals()
+    try:
+        mod = sys.modules[modname]
+    except KeyError:
+        pass
+    try:
+        pname = []
+        for part in modname.split('.'):
+            pname.append(part)
+            mname = '.'.join(pname)
+            mod = gl[mname]
+            sys.modules[mname] = mod
+        return mod
+    except KeyError:
+        raise ImportError("No '%s' in fake module list" % modname)    
+
+
+class MockImporter:
+    def importFromPath(self, path, fqname):
+        try:
+            m = M[fqname]
+        except KeyError:
+            raise ImportError(fqname)
+        sys.modules[fqname] = m
+        return m
+    
+#
+# Tests
+#
+class TestTestLoader(unittest.TestCase):
+
+    def setUp(self):
+        os.listdir = mock_listdir
+        loader.op_isdir = selector.op_isdir = os.path.isdir = mock_isdir
+        loader.op_isfile = selector.op_isfile = os.path.isfile = mock_isfile
+        selector.op_exists = os.path.exists = mock_exists
+        util.__import__ = mock_import
+        self.l = Loader(importer=MockImporter())#, context=MockContext)
+
+    def tearDown(self):
+        os.listdir = _listdir
+        loader.op_isdir = selector.op_isdir = os.path.isdir = _isdir
+        loader.op_isfile = selector.op_isfile = os.path.isfile = _isfile
+        selector.op_exists = os.path.exists = _exists
+        util.__import__ = _import
+
+    def test_lint(self):
+        """Test that main API functions exist
+        """
+        l = self.l
+        l.loadTestsFromTestCase
+        l.loadTestsFromModule
+        l.loadTestsFromName
+        l.loadTestsFromNames
+
+    def test_load_from_name_dir_abs(self):
+        print "load from name dir"
+        l = self.l
+        suite = l.loadTestsFromName(safepath('/a/dir/path'))
+        tests = [t for t in suite]
+        self.assertEqual(len(tests), 1)
+
+    def test_load_from_name_module_filename(self):
+        print "load from name module filename"
+        l = self.l
+        suite = l.loadTestsFromName('test_module.py')
+        tests = [t for t in suite]
+        assert tests
+
+    def test_load_from_name_module(self):
+        print "load from name module"
+        l = self.l
+        suite = l.loadTestsFromName('test_module')
+        tests = [t for t in suite]
+        assert tests            
+
+    def test_load_from_name_nontest_module(self):
+        print "load from name nontest module"
+        l = self.l
+        suite = l.loadTestsFromName('module')
+        tests = [t for t in suite]
+        assert tests
+
+    def test_load_from_name_method(self):
+        print "load from name method"
+        res = unittest.TestResult()
+        l = self.l
+        suite = l.loadTestsFromName(':TC.runTest')
+        tests = [t for t in suite]
+        assert tests
+        for test in tests:
+            test(res)
+        assert res.errors, \
+               "Expected a ValueError for unresolvable test name, got none"
+
+    def test_load_from_name_module_class(self):
+        print "load from name module class"
+        l = self.l
+        suite = l.loadTestsFromName('test_module:TC')
+        tests = [t for t in suite]
+        print tests
+        assert tests
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+
+        # the item in tests is a suite, we want to check that all of
+        # the members of the suite are wrapped -- though this is really
+        # a suite test and doesn't belong here..
+        assert filter(lambda t: isinstance(t, nose.case.Test), tests[0])
+
+    def test_load_from_name_module_func(self):
+        print "load from name module func"
+        l = self.l
+        suite = l.loadTestsFromName('test_module:test_func')
+        tests = [t for t in suite]
+        assert tests
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+        assert isinstance(tests[0].test, nose.case.FunctionTestCase), \
+               "Expected FunctionTestCase not %s" % tests[0].test
+
+    def test_load_from_name_module_method(self):
+        print "load from name module method"
+        l = self.l
+        suite = l.loadTestsFromName('test_module:TC.runTest')
+        tests = [t for t in suite]
+        assert tests
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+
+    def test_load_from_name_module_missing_class(self):
+        print "load from name module missing class"
+        res = unittest.TestResult()
+        l = self.l
+        suite = l.loadTestsFromName('test_module:TC2')
+        tests = [t for t in suite]
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+        tests[0](res)
+        assert res.errors, "Expected missing class test to raise exception"
+
+    def test_load_from_name_module_missing_func(self):
+        print "load from name module missing func"
+        res = unittest.TestResult()
+        l = self.l
+        suite = l.loadTestsFromName('test_module:test_func2')
+        tests = [t for t in suite]
+        assert len(tests) == 1, \
+               "Should have loaded 0 test, but got %s" % tests
+        tests[0](res)
+        assert res.errors, "Expected missing func test to raise exception"
+
+    def test_load_from_name_module_missing_method(self):
+        print "load from name module missing method"
+        res = unittest.TestResult()
+        l = self.l
+        suite = l.loadTestsFromName('test_module:TC.testThat')
+        tests = [t for t in suite]
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+        tests[0](res)
+        assert res.errors, "Expected missing method test to raise exception"
+
+    def test_load_from_name_missing_module(self):
+        print "load from name missing module"
+        res = unittest.TestResult()
+        l = self.l
+        suite = l.loadTestsFromName('other_test_module')
+        tests = [t for t in suite]
+        assert len(tests) == 1, \
+               "Should have loaded 1 test, but got %s" % tests
+        tests[0](res)
+        assert res.errors, "Expected missing module test to raise exception"
+
+    def test_cases_from_testcase_are_wrapped(self):
+        print "cases from testcase are wrapped"
+        test_module = M['test_module']
+        l = self.l
+        suite = l.loadTestsFromTestCase(test_module.TC)
+        print suite
+        tests = [t for t in suite]
+        for test in tests:
+            assert isinstance(test, nose.case.Test), \
+                   "Test %r is not a test wrapper" % test
+
+    def test_load_test_func(self):
+        print "load test func"
+        l = self.l
+        suite = l.loadTestsFromName('test_module')
+        tests = [t for t in suite]
+        self.assertEqual(len(tests), 2, "Wanted 2 tests, got %s" % tests)
+        assert filter(lambda t: isinstance(t, nose.case.Test), tests)
+        print tests
+        class_tests = tests[0]
+        for t in class_tests:
+            print "class test: ", t
+        func_tests = tests[1:]
+        assert class_tests, \
+               "Expected class suite got %s" % class_tests
+        assert len(func_tests) == 1, \
+               "Expected 1 func test got %s" % func_tests
+        for test in class_tests:
+            assert isinstance(test.test, unittest.TestCase), \
+                   "Expected TestCase npt %s" % tests[0].test
+        for test in func_tests:
+            assert isinstance(test.test, nose.case.FunctionTestCase), \
+                   "Expected FunctionTestCase not %s" % tests[1].test
+
+    def test_load_from_name_package_root_path(self):
+        print "load from name package root path"
+        l = self.l
+        suite = l.loadTestsFromName(safepath('/package'))
+        print suite
+        tests = [t for t in suite]
+        assert len(tests) == 1, "Expected one test, got %s" % tests
+        tests = list(tests[0])
+        assert not tests, "The full test list %s was not empty" % tests
+
+    def test_load_from_name_subpackage_safepath(self):
+        print "load from name subpackage path"
+        l = self.l
+        suite = l.loadTestsFromName(safepath('/package/subpackage'))
+        print suite
+        tests = [t for t in suite]
+        assert len(tests) == 0, "Expected no tests, got %s" % tests
+    
+    def test_load_metaclass_customized_classes(self):
+        print "load metaclass-customized classes"
+        test_module_with_generators = M['test_module_with_metaclass_tests']
+        l = self.l
+        suite = l.loadTestsFromModule(test_module_with_generators)
+        tc = [t for t in suite][0]
+        tc_methods = [m for m in tc]
+        self.assertEqual(len(tc_methods), 2)
+
+    def test_load_generators(self):
+        print "load generators"
+        test_module_with_generators = M['test_module_with_generators']
+        l = self.l
+        suite = l.loadTestsFromModule(test_module_with_generators)
+        tests = [t for t in suite]
+
+        for t in tests:
+            print "test", t
+            assert isinstance(t, unittest.TestSuite), \
+                   "Test %s is not a suite" % t
+
+        # the first item is a class, with both normal and generator methods
+        count = 0
+        cl_tests = [t for t in tests[0]]
+        print "class tests", cl_tests
+        normal, gens = cl_tests[0], cl_tests[1:]
+        assert isinstance(normal, nose.case.Test), \
+               "Expected a test case but got %s" % normal
+        for gen in gens:
+            assert isinstance(gen, unittest.TestSuite), \
+                   "Expected a generator test suite, but got %s" % gen
+            count = 0
+            for t in gen:
+                print "generated test %s" % t
+                print t.shortDescription()
+                assert isinstance(t, nose.case.Test), \
+                       "Test %s is not a test?" % t
+                count += 1
+            self.assertEqual(count, 4, "Expected to generate 4 tests, but "
+                             "got %s from %s" % (count, gen))
+            
+        # 2nd item is generated from test_func_generator
+        count = 0
+        for t in tests[1]:
+            print "generated test %s" % t
+            print t.shortDescription()
+            assert isinstance(t, nose.case.Test), \
+                   "Test %s is not a Test?" % t
+            assert isinstance(t.test, nose.case.FunctionTestCase), \
+                   "Test %s is not a FunctionTestCase" % t.test
+            assert 'test_func_generator' in str(t), \
+                   "Bad str val '%s' for test" % str(t)
+            assert 'docstring for test func generator' \
+                   in t.shortDescription(), \
+                   "Bad shortDescription '%s' for test %s" % \
+                   (t.shortDescription(), t)
+            count += 1
+        assert count == 4, \
+               "Expected to generate 4 tests, but got %s" % count
+
+        count = 0
+        for t in tests[2]:
+            print "generated test %s" % t
+            print t.shortDescription()
+            assert isinstance(t, nose.case.Test), \
+                   "Test %s is not a Test?" % t
+            assert isinstance(t.test, nose.case.FunctionTestCase), \
+                   "Test %s is not a FunctionTestCase" % t.test
+            assert 'test_func_generator_name' in str(t), \
+                   "Bad str val '%s' for test" % str(t)
+            assert 'docstring for test func generator name' \
+                   in t.shortDescription(), \
+                   "Bad shortDescription '%s' for test %s" % \
+                   (t.shortDescription(), t)
+            count += 1
+        assert count == 4, \
+               "Expected to generate 4 tests, but got %s" % count
+        
+if __name__ == '__main__':
+    #import logging
+    #logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/unit_tests/test_loader.pyc b/unit_tests/test_loader.pyc
new file mode 100644 (file)
index 0000000..de04a44
Binary files /dev/null and b/unit_tests/test_loader.pyc differ
diff --git a/unit_tests/test_logcapture_plugin$py.class b/unit_tests/test_logcapture_plugin$py.class
new file mode 100644 (file)
index 0000000..a981baa
Binary files /dev/null and b/unit_tests/test_logcapture_plugin$py.class differ
diff --git a/unit_tests/test_logcapture_plugin.py b/unit_tests/test_logcapture_plugin.py
new file mode 100644 (file)
index 0000000..05f667f
--- /dev/null
@@ -0,0 +1,205 @@
+import sys
+from optparse import OptionParser
+from nose.pyversion import UNICODE_STRINGS
+from nose.config import Config
+from nose.plugins.logcapture import LogCapture
+from nose.tools import eq_
+import logging
+from logging import StreamHandler
+import unittest
+
+if sys.version_info >= (2, 7):
+    py27 = True
+else:
+    py27 = False
+
+class TestLogCapturePlugin(object):
+
+    def test_enabled_by_default(self):
+        c = LogCapture()
+        assert c.enabled
+
+    def test_default_options(self):
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser)
+
+        options, args = parser.parse_args(['default_options'])
+        c.configure(options, Config())
+        assert c.enabled
+        eq_(LogCapture.logformat, c.logformat)
+        eq_(LogCapture.clear, c.clear)
+        eq_(LogCapture.filters, c.filters)
+
+    def test_disable_option(self):
+        parser = OptionParser()
+        c = LogCapture()
+        c.addOptions(parser)
+        options, args = parser.parse_args(['test_can_be_disabled_long',
+                                           '--nologcapture'])
+        c.configure(options, Config())
+        assert not c.enabled
+
+        env = {'NOSE_NOLOGCAPTURE': 1}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['test_can_be_disabled'])
+        c.configure(options, Config())
+        assert not c.enabled
+
+    def test_logging_format_option(self):
+        env = {'NOSE_LOGFORMAT': '++%(message)s++'}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['logging_format'])
+        c.configure(options, Config())
+        eq_('++%(message)s++', c.logformat)
+
+    def test_logging_datefmt_option(self):
+        env = {'NOSE_LOGDATEFMT': '%H:%M:%S'}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['logging_datefmt'])
+        c.configure(options, Config())
+        eq_('%H:%M:%S', c.logdatefmt)
+
+    def test_captures_logging(self):
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, {})
+        options, args = parser.parse_args([])
+        c.configure(options, Config())
+        c.start()
+        log = logging.getLogger("foobar.something")
+        log.debug("Hello")
+        c.end()
+        eq_(1, len(c.handler.buffer))
+        eq_("Hello", c.handler.buffer[0].msg)
+
+    def test_clears_all_existing_log_handlers(self):
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, {})
+        options, args = parser.parse_args(['--logging-clear-handlers'])
+        c.configure(options, Config())
+        eq_(c.clear, True)
+
+        def mktest():
+            class TC(unittest.TestCase):
+                def runTest(self):
+                    pass
+            test = TC()
+            return test
+
+        logging.getLogger().addHandler(StreamHandler(sys.stdout))
+        log = logging.getLogger("dummy")
+        log.addHandler(StreamHandler(sys.stdout))
+
+        c.start()
+        c.beforeTest(mktest())
+        c.end()
+
+
+        if py27:
+            expect = ["<class 'nose.plugins.logcapture.MyMemoryHandler'>"]
+        else:
+            expect = ['nose.plugins.logcapture.MyMemoryHandler']
+        eq_([str(c.__class__) for c in logging.getLogger().handlers],
+            expect)
+        eq_([str(c.__class__) for c in logging.getLogger("dummy").handlers],
+            [])
+
+    def test_custom_formatter(self):
+        c = LogCapture()
+        c.logformat = '++%(message)s++'
+        c.start()
+        log = logging.getLogger("foobar.something")
+        log.debug("Hello")
+        c.end()
+        records = c.formatLogRecords()
+        eq_(1, len(records))
+        eq_("++Hello++", records[0])
+
+    def test_logging_filter(self):
+        env = {'NOSE_LOGFILTER': 'foo,bar'}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['foo'])
+        print options, args
+        c.configure(options, Config())
+        c.start()
+        for name in ['foobar.something', 'foo', 'foo.x', 'abara', 'bar.quux']:
+            log = logging.getLogger(name)
+            log.info("Hello %s" % name)
+        c.end()
+        records = c.formatLogRecords()
+        eq_(3, len(records))
+        assert records[0].startswith('foo:'), records[0]
+        assert records[1].startswith('foo.x:'), records[1]
+        assert records[2].startswith('bar.quux:'), records[2]
+        
+    def test_logging_filter_exclude(self):
+        env = {'NOSE_LOGFILTER': '-foo,-bar'}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['foo'])
+        print options, args
+        c.configure(options, Config())
+        c.start()
+        for name in ['foobar.something', 'foo', 'foo.x', 'abara', 'bar.quux']:
+            log = logging.getLogger(name)
+            log.info("Hello %s" % name)
+        c.end()
+        records = c.formatLogRecords()
+        eq_(2, len(records))
+        assert records[0].startswith('foobar.something:'), records[0]
+        assert records[1].startswith('abara:'), records[1]
+        
+    def test_logging_filter_exclude_and_include(self):
+        env = {'NOSE_LOGFILTER': 'foo,-foo.bar'}
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, env)
+        options, args = parser.parse_args(['foo'])
+        print options, args
+        c.configure(options, Config())
+        c.start()
+        for name in ['foo.yes', 'foo.bar', 'foo.bar.no', 'blah']:
+            log = logging.getLogger(name)
+            log.info("Hello %s" % name)
+        c.end()
+        records = c.formatLogRecords()
+        eq_(1, len(records))
+        assert records[0].startswith('foo.yes:'), records[0]
+
+    def test_unicode_messages_handled(self):
+        msg = u'Ivan Krsti\u0107'
+        c = LogCapture()
+        parser = OptionParser()
+        c.addOptions(parser, {})
+        options, args = parser.parse_args([])
+        c.configure(options, Config())
+        c.start()
+        log = logging.getLogger("foobar.something")
+        log.debug(msg)
+        log.debug("ordinary string log")
+        c.end()
+
+        class Dummy:
+            pass
+        test = Dummy() 
+        try:
+            raise Exception(msg)
+        except:
+            err = sys.exc_info()
+        (ec, ev, tb) = c.formatError(test, err)
+        print ev
+        if UNICODE_STRINGS:
+            assert msg in ev
+        else:
+            assert msg.encode('utf-8') in ev
diff --git a/unit_tests/test_logcapture_plugin.pyc b/unit_tests/test_logcapture_plugin.pyc
new file mode 100644 (file)
index 0000000..4ce2ef6
Binary files /dev/null and b/unit_tests/test_logcapture_plugin.pyc differ
diff --git a/unit_tests/test_logging$py.class b/unit_tests/test_logging$py.class
new file mode 100644 (file)
index 0000000..c630bb5
Binary files /dev/null and b/unit_tests/test_logging$py.class differ
diff --git a/unit_tests/test_logging.py b/unit_tests/test_logging.py
new file mode 100644 (file)
index 0000000..82386bf
--- /dev/null
@@ -0,0 +1,40 @@
+import logging
+import unittest
+from nose.config import Config
+#from nose.core import configure_logging
+from mock import *
+
+
+class TestLoggingConfig(unittest.TestCase):
+
+    def setUp(self):
+        # install mock root logger so that these tests don't stomp on
+        # the real logging config of the test runner
+        class MockLogger(logging.Logger):
+            root = logging.RootLogger(logging.WARNING)
+            manager = logging.Manager(root)
+        
+        self.real_logger = logging.Logger
+        self.real_root = logging.root
+        logging.Logger = MockLogger
+        logging.root = MockLogger.root
+        
+    def tearDown(self):
+        # reset real root logger
+        logging.Logger = self.real_logger
+        logging.root = self.real_root
+        
+    def test_isolation(self):
+        """root logger settings ignored"""
+
+        root = logging.getLogger('')
+        nose = logging.getLogger('nose')
+
+        config = Config()
+        config.configureLogging()
+        
+        root.setLevel(logging.DEBUG)
+        self.assertEqual(nose.level, logging.WARN)
+    
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_logging.pyc b/unit_tests/test_logging.pyc
new file mode 100644 (file)
index 0000000..fded5da
Binary files /dev/null and b/unit_tests/test_logging.pyc differ
diff --git a/unit_tests/test_ls_tree.rst b/unit_tests/test_ls_tree.rst
new file mode 100644 (file)
index 0000000..260c641
--- /dev/null
@@ -0,0 +1,50 @@
+    >>> import os
+    >>> import tempfile
+    >>> import shutil
+
+    >>> from nose.util import ls_tree
+
+    >>> dir_path = tempfile.mkdtemp()
+
+    >>> def create_file(filename):
+    ...     fd = os.open(filename, os.O_WRONLY|os.O_CREAT, 0666)
+    ...     os.close(fd)
+
+    >>> os.mkdir(os.path.join(dir_path, "top"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir2"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir3"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir/dir"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir/dir2"))
+    >>> os.mkdir(os.path.join(dir_path, "top/.svn"))
+    >>> os.mkdir(os.path.join(dir_path, "top/.notsvn"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir/.svn"))
+    >>> os.mkdir(os.path.join(dir_path, "top/dir/.notsvn"))
+    >>> create_file(os.path.join(dir_path, "top/file"))
+    >>> create_file(os.path.join(dir_path, "top/backup_file~"))
+    >>> create_file(os.path.join(dir_path, "top/file2"))
+    >>> create_file(os.path.join(dir_path, "top/dir/file"))
+    >>> create_file(os.path.join(dir_path, "top/dir/dir/file"))
+    >>> create_file(os.path.join(dir_path, "top/dir/dir/file2"))
+    >>> create_file(os.path.join(dir_path, "top/dir/backup_file~"))
+    >>> create_file(os.path.join(dir_path, "top/dir2/file"))
+
+    Note that files matching skip_pattern (by default SVN files,
+    backup files and compiled Python files) are ignored
+
+    >>> print ls_tree(os.path.join(dir_path, "top"))
+    |-- file
+    |-- file2
+    |-- .notsvn
+    |-- dir
+    |   |-- file
+    |   |-- .notsvn
+    |   |-- dir
+    |   |   |-- file
+    |   |   `-- file2
+    |   `-- dir2
+    |-- dir2
+    |   `-- file
+    `-- dir3
+
+    >>> shutil.rmtree(dir_path)
diff --git a/unit_tests/test_multiprocess$py.class b/unit_tests/test_multiprocess$py.class
new file mode 100644 (file)
index 0000000..891b32c
Binary files /dev/null and b/unit_tests/test_multiprocess$py.class differ
diff --git a/unit_tests/test_multiprocess.py b/unit_tests/test_multiprocess.py
new file mode 100644 (file)
index 0000000..aeb65e4
--- /dev/null
@@ -0,0 +1,62 @@
+import pickle
+import sys
+import unittest
+
+from nose import case
+from nose.plugins import multiprocess
+from nose.plugins.skip import SkipTest
+from nose.config import Config
+from nose.loader import TestLoader
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+
+
+class ArgChecker:
+    def __init__(self, target, args):
+        self.target = target
+        self.args = args
+        # skip the id and queues
+        pargs = args[7:]
+        self.pickled = pickle.dumps(pargs)
+        try:
+            testQueue = args[1]
+            testQueue.get(timeout=0)
+        except:
+            pass # ok if queue is empty
+    def start(self,*args):
+        pass
+    def is_alive(self):
+        return False
+
+        
+def setup(mod):
+    multiprocess._import_mp()
+    if not multiprocess.Process:
+        raise SkipTest("multiprocessing not available")
+    mod.Process = multiprocess.Process
+    multiprocess.Process = ArgChecker
+        
+
+class T(unittest.TestCase):
+    __test__ = False
+    def runTest(self):
+        pass
+
+def test_mp_process_args_pickleable():
+    # TODO(Kumar) this test needs to be more succint.
+    # If you start seeing it timeout then perhaps we need to skip it again.
+    # raise SkipTest('this currently gets stuck in poll() 90% of the time')
+    test = case.Test(T('runTest'))
+    config = Config()
+    config.multiprocess_workers = 2
+    config.multiprocess_timeout = 5
+    runner = multiprocess.MultiProcessTestRunner(
+        stream=_WritelnDecorator(sys.stdout),
+        verbosity=10,
+        loaderClass=TestLoader,
+        config=config)
+    runner.run(test)
+        
diff --git a/unit_tests/test_multiprocess.pyc b/unit_tests/test_multiprocess.pyc
new file mode 100644 (file)
index 0000000..ae83dde
Binary files /dev/null and b/unit_tests/test_multiprocess.pyc differ
diff --git a/unit_tests/test_multiprocess_runner$py.class b/unit_tests/test_multiprocess_runner$py.class
new file mode 100644 (file)
index 0000000..fccea64
Binary files /dev/null and b/unit_tests/test_multiprocess_runner$py.class differ
diff --git a/unit_tests/test_multiprocess_runner.py b/unit_tests/test_multiprocess_runner.py
new file mode 100644 (file)
index 0000000..71ee398
--- /dev/null
@@ -0,0 +1,120 @@
+import unittest
+import imp
+import sys
+from nose.loader import TestLoader
+from nose.plugins import multiprocess
+from nose.suite import ContextSuite
+
+class T_fixt:
+    def setupClass(cls):
+        pass
+    setupClass = classmethod(setupClass)
+
+    def test_a(self):
+        pass
+    def test_b(self):
+        pass
+    
+class T:
+    def test_a(self):
+        pass
+    def test_b(self):
+        pass
+
+
+
+class TestMultiProcessTestRunner(unittest.TestCase):
+
+    def test_next_batch_with_classes(self):
+        r = multiprocess.MultiProcessTestRunner()
+        l = TestLoader()
+        tests = list(r.nextBatch(ContextSuite(
+                    tests=[l.makeTest(T_fixt), l.makeTest(T)])))
+        print tests
+        self.assertEqual(len(tests), 3)
+
+    def test_next_batch_with_module_fixt(self):
+        mod_with_fixt = imp.new_module('mod_with_fixt')
+        sys.modules['mod_with_fixt'] = mod_with_fixt
+
+        def teardown():
+            pass
+
+        class Test(T):
+            pass
+
+        mod_with_fixt.Test = Test
+        mod_with_fixt.teardown = teardown
+        Test.__module__ = 'mod_with_fixt'
+
+        r = multiprocess.MultiProcessTestRunner()
+        l = TestLoader()
+        tests = list(r.nextBatch(l.loadTestsFromModule(mod_with_fixt)))
+        print tests
+        self.assertEqual(len(tests), 1)
+
+    def test_next_batch_with_module(self):
+        mod_no_fixt = imp.new_module('mod_no_fixt')
+        sys.modules['mod_no_fixt'] = mod_no_fixt
+
+        class Test2(T):
+            pass
+
+        class Test_fixt(T_fixt):
+            pass
+
+        mod_no_fixt.Test = Test2
+        Test2.__module__ = 'mod_no_fixt'
+        mod_no_fixt.Test_fixt = Test_fixt
+        Test_fixt.__module__ = 'mod_no_fixt'
+
+        r = multiprocess.MultiProcessTestRunner()
+        l = TestLoader()
+        tests = list(r.nextBatch(l.loadTestsFromModule(mod_no_fixt)))
+        print tests
+        self.assertEqual(len(tests), 3)
+
+    def test_next_batch_with_generator_method(self):
+        class Tg:
+            def test_gen(self):
+                for i in range(0, 3):
+                    yield self.check, i
+            def check(self, val):
+                pass
+        r = multiprocess.MultiProcessTestRunner()
+        l = TestLoader()
+        tests = list(r.nextBatch(l.makeTest(Tg)))
+        print tests
+        print [r.address(t) for t in tests]
+        self.assertEqual(len(tests), 1)
+
+    def test_next_batch_can_split_set(self):
+
+        mod_with_fixt2 = imp.new_module('mod_with_fixt2')
+        sys.modules['mod_with_fixt2'] = mod_with_fixt2
+
+        def setup():
+            pass
+
+        class Test(T):
+            pass
+
+        class Test_fixt(T_fixt):
+            pass
+
+        mod_with_fixt2.Test = Test
+        mod_with_fixt2.Test_fixt = Test_fixt
+        mod_with_fixt2.setup = setup
+        mod_with_fixt2._multiprocess_can_split_ = True
+        Test.__module__ = 'mod_with_fixt2'
+        Test_fixt.__module__ = 'mod_with_fixt2'
+
+        r = multiprocess.MultiProcessTestRunner()
+        l = TestLoader()
+        tests = list(r.nextBatch(l.loadTestsFromModule(mod_with_fixt2)))
+        print tests
+        self.assertEqual(len(tests), 3)
+        
+            
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_multiprocess_runner.pyc b/unit_tests/test_multiprocess_runner.pyc
new file mode 100644 (file)
index 0000000..de2c861
Binary files /dev/null and b/unit_tests/test_multiprocess_runner.pyc differ
diff --git a/unit_tests/test_pdb_plugin$py.class b/unit_tests/test_pdb_plugin$py.class
new file mode 100644 (file)
index 0000000..2bab4b9
Binary files /dev/null and b/unit_tests/test_pdb_plugin$py.class differ
diff --git a/unit_tests/test_pdb_plugin.py b/unit_tests/test_pdb_plugin.py
new file mode 100644 (file)
index 0000000..cdd43f2
--- /dev/null
@@ -0,0 +1,117 @@
+import sys
+import unittest
+from nose.config import Config
+from nose.plugins import debug
+from optparse import OptionParser
+from StringIO import StringIO
+
+class StubPdb:
+    called = False
+    def post_mortem(self, tb):
+        self.called = True
+
+class TestPdbPlugin(unittest.TestCase):
+
+    def setUp(self):
+        self._pdb = debug.pdb
+        self._so = sys.stdout
+        debug.pdb = StubPdb()
+
+    def tearDown(self):
+        debug.pdb = self._pdb
+        sys.stdout = self._so
+
+    def test_plugin_api(self):
+        p = debug.Pdb()
+        p.addOptions
+        p.configure
+        p.addError
+        p.addFailure
+
+    def test_plugin_calls_pdb(self):
+        p = debug.Pdb()
+
+        try:
+            raise Exception("oops")
+        except:
+            err = sys.exc_info()
+    
+        p.enabled = True
+        p.enabled_for_errors = True
+        p.enabled_for_failures = True
+
+        p.addError(None, err)
+        assert debug.pdb.called, "Did not call pdb.post_mortem on error"
+
+        debug.pdb.called = False
+        p.addFailure(None, err)
+        assert debug.pdb.called, "Did not call pdb.post_mortem on failure"
+
+    def test_command_line_options_enable(self):
+        parser = OptionParser()
+
+        p = debug.Pdb()
+        p.addOptions(parser)
+        options, args = parser.parse_args(['test_configuration',
+                                           '--pdb',
+                                           '--pdb-failures'])
+        p.configure(options, Config())
+        assert p.enabled
+        assert p.enabled_for_errors
+        assert p.enabled_for_failures
+
+    def test_disabled_by_default(self):
+        p = debug.Pdb()
+        assert not p.enabled
+        assert not p.enabled_for_failures
+
+        parser = OptionParser()
+        p.addOptions(parser)
+        options, args = parser.parse_args(['test_configuration'])
+        p.configure(options, Config())
+        assert not p.enabled
+        assert not p.enabled_for_errors
+        assert not p.enabled_for_failures
+        
+    def test_env_settings_enable(self):
+        p = debug.Pdb()
+        assert not p.enabled
+        assert not p.enabled_for_failures
+
+        env = {'NOSE_PDB': '1',
+               'NOSE_PDB_FAILURES': '1'}
+
+        parser = OptionParser()
+        p.addOptions(parser, env)
+        options, args = parser.parse_args(['test_configuration'])
+        p.configure(options, Config())
+        assert p.enabled
+        assert p.enabled_for_errors
+        assert p.enabled_for_failures
+
+    def test_real_stdout_restored_before_call(self):
+        
+        class CheckStdout(StubPdb):
+            def post_mortem(self, tb):
+                assert sys.stdout is sys.__stdout__, \
+                       "sys.stdout was not restored to sys.__stdout__ " \
+                       "before call"
+        debug.pdb = CheckStdout()
+
+        patch = StringIO()
+        sys.stdout = patch
+        p = debug.Pdb()
+        p.enabled = True
+        p.enabled_for_errors = True
+
+        try:
+            raise Exception("oops")
+        except:
+            err = sys.exc_info()
+    
+        p.addError(None, err)    
+        assert sys.stdout is patch, "sys.stdout was not reset after call"
+        
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_pdb_plugin.pyc b/unit_tests/test_pdb_plugin.pyc
new file mode 100644 (file)
index 0000000..e22cd19
Binary files /dev/null and b/unit_tests/test_pdb_plugin.pyc differ
diff --git a/unit_tests/test_plugin$py.class b/unit_tests/test_plugin$py.class
new file mode 100644 (file)
index 0000000..26b0b74
Binary files /dev/null and b/unit_tests/test_plugin$py.class differ
diff --git a/unit_tests/test_plugin.py b/unit_tests/test_plugin.py
new file mode 100644 (file)
index 0000000..aa2d9cc
--- /dev/null
@@ -0,0 +1,33 @@
+import optparse
+import unittest
+
+import nose.plugins
+
+
+class OptionProcessingTests(unittest.TestCase):
+
+    def test_enable_plugin(self):
+        class NamedPlugin(nose.plugins.Plugin):
+            name = "jim-bob"
+        def parse_options(env, args_in):
+            plugin = NamedPlugin()
+            parser = optparse.OptionParser()
+            plugin.options(parser, env)
+            options, args = parser.parse_args(args_in)
+            return options
+        options = parse_options({}, [])
+        assert not options.enable_plugin_jim_bob, \
+               "Plugin should not be enabled"
+        options = parse_options({"NOSE_WITH_JIM_BOB": "1"}, [])
+        assert options.enable_plugin_jim_bob, \
+               "Plugin should be enabled"
+        options = parse_options({}, ["--with-jim-bob"])
+        assert options.enable_plugin_jim_bob, \
+               "Plugin should be enabled"
+        options = parse_options({"NOSE_WITH_JIM_BOB": "1"}, ["--with-jim-bob"])
+        assert options.enable_plugin_jim_bob, \
+               "Plugin should be enabled"
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_plugin.pyc b/unit_tests/test_plugin.pyc
new file mode 100644 (file)
index 0000000..7af0d99
Binary files /dev/null and b/unit_tests/test_plugin.pyc differ
diff --git a/unit_tests/test_plugin_interfaces$py.class b/unit_tests/test_plugin_interfaces$py.class
new file mode 100644 (file)
index 0000000..25eb3bb
Binary files /dev/null and b/unit_tests/test_plugin_interfaces$py.class differ
diff --git a/unit_tests/test_plugin_interfaces.py b/unit_tests/test_plugin_interfaces.py
new file mode 100644 (file)
index 0000000..499af6b
--- /dev/null
@@ -0,0 +1,45 @@
+import unittest
+from nose.plugins.base import IPluginInterface
+
+class TestPluginInterfaces(unittest.TestCase):
+
+    def test_api_methods_present(self):
+
+        from nose.loader import TestLoader
+        from nose.selector import Selector
+
+        
+        exclude = [ 'loadTestsFromGenerator',
+                    'loadTestsFromGeneratorMethod'
+                    ]
+        
+        selfuncs = [ f for f in dir(Selector)
+                     if f.startswith('want') ]
+        loadfuncs = [ f for f in dir(TestLoader)
+                      if f.startswith('load') and not f in exclude ]
+        
+        others = ['addDeprecated', 'addError', 'addFailure',
+                  'addSkip', 'addSuccess', 'startTest', 'stopTest',
+                  'prepareTest', 'begin', 'report'
+                  ] 
+
+        expect = selfuncs + loadfuncs + others
+        
+        pd = dir(IPluginInterface)
+        
+        for f in expect:
+            assert f in pd, "No %s in IPluginInterface" % f
+            assert getattr(IPluginInterface, f).__doc__, \
+                "No docs for %f in IPluginInterface" % f
+            
+    def test_no_instantiate(self):
+        try:
+            p = IPluginInterface()
+        except TypeError:
+            pass
+        else:
+            assert False, \
+                "Should not be able to instantiate IPluginInterface"
+            
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_plugin_interfaces.pyc b/unit_tests/test_plugin_interfaces.pyc
new file mode 100644 (file)
index 0000000..c4d31b1
Binary files /dev/null and b/unit_tests/test_plugin_interfaces.pyc differ
diff --git a/unit_tests/test_plugin_manager$py.class b/unit_tests/test_plugin_manager$py.class
new file mode 100644 (file)
index 0000000..1332395
Binary files /dev/null and b/unit_tests/test_plugin_manager$py.class differ
diff --git a/unit_tests/test_plugin_manager.py b/unit_tests/test_plugin_manager.py
new file mode 100644 (file)
index 0000000..578ce03
--- /dev/null
@@ -0,0 +1,74 @@
+import unittest
+from nose import case
+from nose.plugins import Plugin, PluginManager
+
+
+class Plug(Plugin):
+    def loadTestsFromFile(self, path):
+        class TC(unittest.TestCase):
+            def test(self):
+                pass
+        return [TC('test')]
+    def addError(self, test, err):
+        return True
+
+class Plug2(Plugin):
+    def loadTestsFromFile(self, path):
+        class TCT(unittest.TestCase):
+            def test_2(self):
+                pass
+        return [TCT('test_2')]
+    def addError(self, test, err):
+        assert False, "Should not have been called"
+
+class Plug3(Plugin):
+    def loadTestsFromModule(self, module):
+        raise TypeError("I don't like to type")
+
+class Plug4(Plugin):
+    def loadTestsFromModule(self, module):
+        raise AttributeError("I am missing my nose")
+
+class BetterPlug2(Plugin):
+    name = 'plug2'
+
+
+class TestPluginManager(unittest.TestCase):
+
+    def test_proxy_to_plugins(self):
+        man = PluginManager(plugins=[Plug(), Plug2()])
+
+        # simple proxy: first plugin to return a value wins
+        self.assertEqual(man.addError(None, None), True)
+
+        # multiple proxy: all plugins that return values get to run
+        all = []
+        for res in man.loadTestsFromFile('foo'):
+            print res
+            all.append(res)
+        self.assertEqual(len(all), 2)
+
+    def test_iter(self):
+        expect = [Plug(), Plug2()]
+        man = PluginManager(plugins=expect)
+        for plug in man:
+            self.assertEqual(plug, expect.pop(0))
+        assert not expect, \
+               "Some plugins were not found by iteration: %s" % expect
+
+    def test_plugin_generative_method_errors_not_hidden(self):
+        import nose.failure
+        pm = PluginManager(plugins=[Plug3(), Plug4()])
+        loaded = list(pm.loadTestsFromModule('whatever'))
+        self.assertEqual(len(loaded), 2)
+        for test in loaded:
+            assert isinstance(test, nose.failure.Failure), \
+            "%s is not a failure" % test
+
+    def test_plugin_override(self):
+        pm = PluginManager(plugins=[Plug2(), BetterPlug2()])
+        self.assertEqual(len(pm.plugins), 1)
+        assert isinstance(pm.plugins[0], BetterPlug2)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_plugin_manager.pyc b/unit_tests/test_plugin_manager.pyc
new file mode 100644 (file)
index 0000000..4ed9183
Binary files /dev/null and b/unit_tests/test_plugin_manager.pyc differ
diff --git a/unit_tests/test_plugins$py.class b/unit_tests/test_plugins$py.class
new file mode 100644 (file)
index 0000000..1eac912
Binary files /dev/null and b/unit_tests/test_plugins$py.class differ
diff --git a/unit_tests/test_plugins.py b/unit_tests/test_plugins.py
new file mode 100644 (file)
index 0000000..95c3e61
--- /dev/null
@@ -0,0 +1,415 @@
+import logging
+import os
+import sys
+import unittest
+import nose.plugins
+from optparse import OptionParser
+import tempfile
+from warnings import warn, filterwarnings, resetwarnings
+
+from nose import SkipTest
+from nose.pyversion import unbound_method
+from nose.config import Config
+from nose.plugins.attrib import AttributeSelector
+from nose.plugins.base import Plugin
+from nose.plugins.cover import Coverage
+from nose.plugins.doctests import Doctest
+from nose.plugins.prof import Profile
+
+from mock import *
+
+class P(Plugin):
+    """Plugin of destiny!"""    
+    pass
+
+class ErrPlugin(object):
+    def load(self):
+        raise Exception("Failed to load the plugin")
+    
+class ErrPkgResources(object):
+    def iter_entry_points(self, ep):
+        yield ErrPlugin()
+
+        
+# some plugins have 2.4-only features
+compat_24 = sys.version_info >= (2, 4)
+
+
+class TestBuiltinPlugins(unittest.TestCase):
+
+    def setUp(self):
+        self.p = sys.path[:]
+
+    def tearDown(self):
+        sys.path = self.p[:]
+                
+    def test_add_options(self):
+        conf = Config()
+        opt = Bucket()
+        parser = MockOptParser()
+        plug = P()
+
+        plug.add_options(parser)
+        o, d = parser.opts[0]
+        # print d
+        assert o[0] == '--with-p'
+        assert d['action'] == 'store_true'
+        assert not d['default']
+        assert d['dest'] == 'enable_plugin_p'
+        assert d['help'] == 'Enable plugin P: Plugin of destiny! [NOSE_WITH_P]'
+
+        opt.enable_plugin_p = True
+        plug.configure(opt, conf)
+        assert plug.enabled
+
+        
+class TestDoctestPlugin(unittest.TestCase):
+
+    def setUp(self):
+        self.p = sys.path[:]
+
+    def tearDown(self):
+        sys.path = self.p[:]
+    
+    def test_add_options(self):
+        # doctest plugin adds some options...
+        conf = Config()
+        opt = Bucket()
+        parser = MockOptParser()
+        plug = Doctest()
+        
+        plug.add_options(parser, {})
+        o, d = parser.opts[0]
+        assert o[0] == '--with-doctest'
+
+        o2, d2 = parser.opts[1]
+        assert o2[0] == '--doctest-tests'
+        
+        o3, d3 = parser.opts[2]
+        assert o3[0] == '--doctest-extension'
+
+    def test_config(self):
+        # test that configuration works properly when both environment
+        # and command line specify a doctest extension
+        parser = OptionParser()
+        env = {'NOSE_DOCTEST_EXTENSION':'ext'}
+        argv = ['--doctest-extension', 'txt']
+        dtp = Doctest()
+        dtp.add_options(parser, env)
+        options, args = parser.parse_args(argv)
+        
+        print options
+        print args
+        self.assertEqual(options.doctestExtension, ['ext', 'txt'])
+
+        env = {}
+        parser = OptionParser()
+        dtp.add_options(parser, env)
+        options, args = parser.parse_args(argv)
+        print options
+        print args
+        self.assertEqual(options.doctestExtension, ['txt'])
+            
+    def test_want_file(self):
+        # doctest plugin can select module and/or non-module files
+        conf = Config()
+        opt = Bucket()
+        plug = Doctest()
+        plug.can_configure = True
+        plug.configure(opt, conf)
+        
+        assert plug.wantFile('foo.py')
+        assert not plug.wantFile('bar.txt')
+        assert not plug.wantFile('buz.rst')
+        assert not plug.wantFile('bing.mov')
+        
+        plug.extension = ['.txt', '.rst']
+        assert plug.wantFile('/path/to/foo.py')
+        assert plug.wantFile('/path/to/bar.txt')
+        assert plug.wantFile('/path/to/buz.rst')
+        assert not plug.wantFile('/path/to/bing.mov')
+        
+    def test_matches(self):
+        # doctest plugin wants tests from all NON-test modules
+        conf = Config()
+        opt = Bucket()
+        plug = Doctest()
+        plug.can_configure = True
+        plug.configure(opt, conf)
+        assert not plug.matches('test')
+        assert plug.matches('foo')
+
+    def test_collect_pymodule(self):
+        here = os.path.dirname(__file__)
+        support = os.path.join(here, 'support')
+        if not support in sys.path:
+            sys.path.insert(0, support)
+        import foo.bar.buz
+        
+        conf = Config()
+        opt = Bucket()
+        plug = Doctest()
+        plug.can_configure = True
+        plug.configure(opt, conf)
+        suite = plug.loadTestsFromModule(foo.bar.buz)        
+        expect = ['[afunc (foo.bar.buz)]']
+        for test in suite:
+            self.assertEqual(str(test), expect.pop(0))
+
+    def test_addresses(self):
+        here = os.path.dirname(__file__)
+        support = os.path.join(here, 'support')
+        if not support in sys.path:
+            sys.path.insert(0, support)
+        import foo.bar.buz
+        
+        conf = Config()
+        opt = Bucket()
+        plug = Doctest()
+        plug.can_configure = True
+        plug.configure(opt, conf)
+        suite = plug.loadTestsFromModule(foo.bar.buz)
+        for test in suite:
+            print test.address()
+            file, mod, call = test.address()
+            self.assertEqual(mod, 'foo.bar.buz')
+            self.assertEqual(call, None)
+            for case in test:
+                print case.address()
+                file, mod, call = case.address()
+                self.assertEqual(mod, 'foo.bar.buz')
+                self.assertEqual(call, 'afunc')
+            
+    def test_collect_txtfile(self):
+        here = os.path.abspath(os.path.dirname(__file__))
+        support = os.path.join(here, 'support')
+        fn = os.path.join(support, 'foo', 'doctests.txt')
+        
+        conf = Config()        
+        opt = Bucket()
+        plug = Doctest()
+        plug.can_configure = True
+        plug.configure(opt, conf)
+        plug.extension = ['.txt']
+        suite = plug.loadTestsFromFile(fn)
+        for test in suite:
+            assert str(test).endswith('doctests.txt')
+            assert test.address(), "Test %s has no address"
+        
+    def test_collect_no_collect(self):
+        # bug http://nose.python-hosting.com/ticket/55 
+        # we got "iteration over non-sequence" when no files match
+        here = os.path.abspath(os.path.dirname(__file__))
+        support = os.path.join(here, 'support')
+        plug = Doctest()
+        for test in plug.loadTestsFromFile(os.path.join(support, 'foo')):
+            self.fail("Expected no tests, got %s" % test)
+
+
+class TestAttribPlugin(unittest.TestCase):
+
+    def test_add_options(self):
+        plug = AttributeSelector()
+        parser = MockOptParser()
+        plug.add_options(parser)
+
+        expect = [(('-a', '--attr'),
+                   {'dest': 'attr', 'action': 'append', 'default': None,
+                    'metavar': 'ATTR',
+                    'help': 'Run only tests that have attributes '
+                    'specified by ATTR [NOSE_ATTR]'})]
+
+        if compat_24:
+            expect.append(
+                (('-A', '--eval-attr'),
+                 {'dest': 'eval_attr', 'action': 'append',
+                  'default': None, 'metavar': 'EXPR',
+                  'help': 'Run only tests for whose attributes the '
+                  'Python expression EXPR evaluates to True '
+                  '[NOSE_EVAL_ATTR]'}))
+        self.assertEqual(parser.opts, expect)
+
+        opt = Bucket()
+        opt.attr = ['!slow']
+        plug.configure(opt, Config())
+        assert plug.enabled
+        self.assertEqual(plug.attribs, [[('slow', False)]])
+
+        opt.attr = ['fast,quick', 'weird=66']
+        plug.configure(opt, Config())
+        self.assertEqual(plug.attribs, [[('fast', True),
+                                         ('quick', True)],
+                                        [('weird', '66')]])
+
+        # don't die on trailing ,
+        opt.attr = [ 'something,' ]
+        plug.configure(opt, Config())
+        self.assertEqual(plug.attribs, [[('something', True)]] )
+        
+        if compat_24:
+            opt.attr = None
+            opt.eval_attr = [ 'weird >= 66' ]
+            plug.configure(opt, Config())
+            self.assertEqual(plug.attribs[0][0][0], 'weird >= 66')
+            assert callable(plug.attribs[0][0][1])
+                       
+    def test_basic_attr(self):
+        def f():
+            pass
+        f.a = 1
+
+        def g():
+            pass
+    
+        plug = AttributeSelector()
+        plug.attribs = [[('a', True)]]
+        assert plug.wantFunction(f) is not False
+        assert not plug.wantFunction(g)
+
+    def test_class_attr(self):
+        class TestP:
+            foo = True
+            def h():
+                pass
+
+        def i():
+            pass
+        
+        plug = AttributeSelector()
+        plug.attribs = [[('foo', True)]]
+        assert plug.wantMethod(unbound_method(TestP, TestP.h)) is not False
+        assert plug.wantFunction(i) is False
+        
+    def test_eval_attr(self):
+        if not compat_24:
+            warn("No support for eval attributes in python versions older"
+                 " than 2.4")
+            return
+        def f():
+            pass
+        f.monkey = 2
+        
+        def g():
+            pass
+        g.monkey = 6
+
+        def h():
+            pass
+        h.monkey = 5
+        
+        cnf = Config()
+        opt = Bucket()
+        opt.eval_attr = "monkey > 5"
+        plug = AttributeSelector()
+        plug.configure(opt, cnf)
+
+        assert not plug.wantFunction(f)
+        assert plug.wantFunction(g) is not False
+        assert not plug.wantFunction(h)
+
+    def test_attr_a_b(self):
+        def f1():
+            pass
+        f1.tags = ['a', 'b']
+
+        def f2():
+            pass
+        f2.tags = ['a', 'c']
+
+        def f3():
+            pass
+        f3.tags = ['b', 'c']
+
+        def f4():
+            pass
+        f4.tags = ['c', 'd']
+        
+        cnf = Config()
+        parser = OptionParser()
+        plug = AttributeSelector()
+
+        plug.add_options(parser)
+
+        # OR
+        opt, args = parser.parse_args(['test', '-a', 'tags=a',
+                                       '-a', 'tags=b'])
+        print opt
+        plug.configure(opt, cnf)
+
+        assert plug.wantFunction(f1) is None
+        assert plug.wantFunction(f2) is None
+        assert plug.wantFunction(f3) is None
+        assert not plug.wantFunction(f4)
+
+        # AND
+        opt, args = parser.parse_args(['test', '-a', 'tags=a,tags=b'])
+        print opt
+        plug.configure(opt, cnf)
+
+        assert plug.wantFunction(f1) is None
+        assert not plug.wantFunction(f2)
+        assert not plug.wantFunction(f3)
+        assert not plug.wantFunction(f4)
+        
+
+class TestProfPlugin(unittest.TestCase):
+
+    def setUp(self):        
+        if not Profile.available():
+            raise SkipTest('profile plugin not available; skipping')
+
+    def test_options(self):
+        parser = OptionParser()
+        conf = Config()
+        plug = Profile()
+
+        plug.add_options(parser, {})
+        opts = [ o._long_opts[0] for o in parser.option_list ]
+        assert '--profile-sort' in opts
+        assert '--profile-stats-file' in opts
+        assert '--with-profile' in opts
+        assert '--profile-restrict' in opts
+
+    def test_begin(self):
+        plug = Profile()
+        plug.pfile = tempfile.mkstemp()[1]
+        try:
+            plug.begin()
+            assert plug.prof
+        finally:
+            plug.finalize(None)
+
+    def test_prepare_test(self):
+        r = {}
+        class dummy:
+            def runcall(self, f, r):
+                r[1] = f(), "wrapped"
+        def func():
+            return "func"
+        
+        plug = Profile()
+        plug.prof = dummy()
+        result = plug.prepareTest(func)
+        try:
+            result(r)
+            assert r[1] == ("func", "wrapped")
+        finally:
+            plug.finalize(None)
+
+    def test_finalize(self):
+        def func():
+            pass
+
+        plug = Profile()
+        plug.begin()
+        plug.prepareTest(func)
+        pfile = plug.pfile
+        try:
+            assert os.path.exists(pfile)
+        finally:
+            plug.finalize(None)
+        assert not os.path.exists(pfile), \
+               "finalize did not remove temp file %s" % pfile
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_plugins.pyc b/unit_tests/test_plugins.pyc
new file mode 100644 (file)
index 0000000..e786ef6
Binary files /dev/null and b/unit_tests/test_plugins.pyc differ
diff --git a/unit_tests/test_result_proxy$py.class b/unit_tests/test_result_proxy$py.class
new file mode 100644 (file)
index 0000000..7f6a312
Binary files /dev/null and b/unit_tests/test_result_proxy$py.class differ
diff --git a/unit_tests/test_result_proxy.py b/unit_tests/test_result_proxy.py
new file mode 100644 (file)
index 0000000..3d6e2ac
--- /dev/null
@@ -0,0 +1,188 @@
+import sys
+import unittest
+from inspect import ismethod
+from nose.config import Config
+from nose.proxy import ResultProxyFactory, ResultProxy
+from mock import RecordingPluginManager
+
+class TestResultProxy(unittest.TestCase):
+
+    def test_proxy_has_basic_methods(self):
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, test=None)
+
+        methods = [ 'addError', 'addFailure', 'addSuccess',
+                    'startTest', 'stopTest', 'stop' ]
+        for method in methods:
+            m = getattr(proxy, method)
+            assert ismethod(m), "%s is not a method" % method
+            
+    def test_proxy_has_nose_methods(self):
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, test=None)
+
+        methods = [ 'beforeTest', 'afterTest' ]
+        for method in methods:
+            m = getattr(proxy, method)
+            assert ismethod(m), "%s is not a method" % method
+
+    def test_proxy_proxies(self):
+        from nose.case import Test
+        class Dummy:
+            def __init__(self):
+                self.__dict__['called'] = []
+            def __getattr__(self, attr):
+                c = self.__dict__['called']
+                c.append(attr)
+                def dummy(*arg, **kw):
+                    pass
+                return dummy
+        class TC(unittest.TestCase):
+            def runTest(self):
+                pass
+        try:
+            raise Exception("exception")
+        except:
+            err = sys.exc_info()
+        test = TC()
+        case = Test(test)
+        res = Dummy()
+        proxy = ResultProxy(res, test=case)
+        proxy.addError(test, err)
+        proxy.addFailure(test, err)
+        proxy.addSuccess(test)
+        proxy.startTest(test)
+        proxy.stopTest(test)
+        proxy.beforeTest(test)
+        proxy.afterTest(test)
+        proxy.stop()
+        proxy.shouldStop = 'yes please'
+        for method in ['addError', 'addFailure', 'addSuccess',
+                       'startTest', 'stopTest', 'beforeTest', 'afterTest',
+                       'stop']:
+            assert method in res.called, "%s was not proxied"
+        self.assertEqual(res.shouldStop, 'yes please')
+
+    def test_attributes_are_proxied(self):
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, test=None)
+        proxy.errors
+        proxy.failures
+        proxy.shouldStop
+        proxy.testsRun
+
+    def test_test_cases_can_access_result_attributes(self):
+        from nose.case import Test
+        class TC(unittest.TestCase):
+            def run(self, result):
+                unittest.TestCase.run(self, result)
+                print "errors", result.errors
+                print "failures", result.failures
+            def runTest(self):
+                pass
+        test = TC()
+        case = Test(test)
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, test=case)
+        case(proxy)
+
+    def test_proxy_handles_missing_methods(self):
+        from nose.case import Test
+        class TC(unittest.TestCase):
+            def runTest(self):
+                pass
+        test = TC()
+        case = Test(test)
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, case)
+        proxy.beforeTest(test)
+        proxy.afterTest(test)
+        
+    def test_proxy_calls_plugins(self):
+        from nose.case import Test
+        res = unittest.TestResult()
+        class TC(unittest.TestCase):
+            def test_error(self):
+                print "So long"
+                raise TypeError("oops")
+            def test_fail(self):
+                print "Hello"
+                self.fail()
+            def test(self):
+                pass
+        plugs = RecordingPluginManager()
+        config = Config(plugins=plugs)
+
+        factory = ResultProxyFactory(config=config)
+
+        case_e = Test(TC('test_error'))
+        case_f = Test(TC('test_fail'))
+        case_t = Test(TC('test'))
+
+        pres_e = factory(res, case_e)
+        case_e(pres_e)
+        assert 'beforeTest' in plugs.called
+        assert 'startTest' in plugs.called
+        assert 'addError' in plugs.called
+        assert 'stopTest' in plugs.called
+        assert 'afterTest' in plugs.called
+        plugs.reset()
+
+        pres_f = factory(res, case_f)
+        case_f(pres_f)
+        assert 'beforeTest' in plugs.called
+        assert 'startTest' in plugs.called
+        assert 'addFailure' in plugs.called
+        assert 'stopTest' in plugs.called
+        assert 'afterTest' in plugs.called
+        plugs.reset()
+
+        pres_t = factory(res, case_t)
+        case_t(pres_t)
+        assert 'beforeTest' in plugs.called
+        assert 'startTest' in plugs.called
+        assert 'addSuccess' in plugs.called
+        assert 'stopTest' in plugs.called
+        assert 'afterTest' in plugs.called
+        plugs.reset()
+
+    def test_stop_on_error(self):
+        from nose.case import Test
+        class TC(unittest.TestCase):
+            def runTest(self):
+                raise Exception("Enough!")
+        conf = Config(stopOnError=True)
+        test = TC()
+        case = Test(test)
+        res = unittest.TestResult()
+        proxy = ResultProxy(res, case, config=conf)
+        case(proxy)
+        assert proxy.shouldStop
+        assert res.shouldStop
+
+    def test_coercion_of_custom_exception(self):
+        from nose.case import Test
+
+        class CustomException(Exception):
+            def __init__(self, message, two, three):
+                Exception.__init__(self, message)
+
+        class TC(unittest.TestCase):
+            def runTest(self):
+                pass
+
+        test = TC()
+        case = Test(test)
+        res = unittest.TestResult()
+        try:
+            raise CustomException("the error", 2, 3)
+        except:
+            etype, val, tb = sys.exc_info()
+        val = str(val) # simulate plugin shenanigans
+        proxy = ResultProxy(res, test=case)
+        # Python 3 coercion should happen here without error
+        proxy.addError(test, (etype, val, tb))
+        proxy.addFailure(test, (etype, val, tb))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_result_proxy.pyc b/unit_tests/test_result_proxy.pyc
new file mode 100644 (file)
index 0000000..466fbcc
Binary files /dev/null and b/unit_tests/test_result_proxy.pyc differ
diff --git a/unit_tests/test_selector$py.class b/unit_tests/test_selector$py.class
new file mode 100644 (file)
index 0000000..461f309
Binary files /dev/null and b/unit_tests/test_selector$py.class differ
diff --git a/unit_tests/test_selector.py b/unit_tests/test_selector.py
new file mode 100644 (file)
index 0000000..73e1593
--- /dev/null
@@ -0,0 +1,200 @@
+import logging
+import os
+import re
+import unittest
+import nose.selector
+from nose.config import Config
+from nose.selector import log, Selector
+from nose.util import absdir
+from mock import mod
+
+class TestSelector(unittest.TestCase):
+
+    def tearDown(self):
+        logging.getLogger('nose.selector').setLevel(logging.WARN)
+    
+    def test_ignore_files_default(self):
+        """A default configuration should always skip some 'hidden' files."""
+        s = Selector(Config())
+        
+        assert not s.wantFile('_test_underscore.py')
+        assert not s.wantFile('.test_hidden.py')
+        assert not s.wantFile('setup.py')
+        
+    def test_ignore_files_override(self):
+        """Override the configuration to skip only specified files."""
+        c = Config()
+        c.ignoreFiles = [re.compile(r'^test_favourite_colour\.py$')]
+        s = Selector(c)
+        
+        assert s.wantFile('_test_underscore.py')
+        assert s.wantFile('.test_hidden.py')
+        assert not s.wantFile('setup.py') # Actually excluded because of testMatch
+        assert not s.wantFile('test_favourite_colour.py')
+    
+    def test_exclude(self):
+        s = Selector(Config())
+        c = Config()
+        c.exclude = [re.compile(r'me')]
+        s2 = Selector(c)
+        
+        assert s.matches('test_foo')
+        assert s2.matches('test_foo')
+        assert s.matches('test_me')
+        assert not s2.matches('test_me')
+        
+    def test_include(self):
+        s = Selector(Config())
+        c = Config()
+        c.include = [re.compile(r'me')]
+        s2 = Selector(c)
+
+        assert s.matches('test')
+        assert s2.matches('test')
+        assert not s.matches('meatball')
+        assert s2.matches('meatball')
+        assert not s.matches('toyota')
+        assert not s2.matches('toyota')
+        
+        c.include.append(re.compile('toy'))
+        assert s.matches('test')
+        assert s2.matches('test')
+        assert not s.matches('meatball')
+        assert s2.matches('meatball')
+        assert not s.matches('toyota')
+        assert s2.matches('toyota')
+        
+    def test_want_class(self):
+        class Foo:
+            pass
+        class Bar(unittest.TestCase):
+            pass
+        class TestMe:
+            pass
+        class TestType(type):
+            def __new__(cls, name, bases, dct):
+                return type.__new__(cls, name, bases, dct)
+        class TestClass(object):
+            __metaclass__ = TestType
+        
+        s = Selector(Config())
+        assert not s.wantClass(Foo)
+        assert s.wantClass(Bar)
+        assert s.wantClass(TestMe)
+        assert s.wantClass(TestClass)
+
+        TestMe.__test__ = False
+        assert not s.wantClass(TestMe), "Failed to respect __test__ = False"
+        Bar.__test__ = False
+        assert not s.wantClass(Bar), "Failed to respect __test__ = False"
+        
+    def test_want_directory(self):
+        s = Selector(Config())
+        assert s.wantDirectory('test')
+        assert not s.wantDirectory('test/whatever')
+        assert s.wantDirectory('whatever/test')
+        assert not s.wantDirectory('/some/path/to/unit_tests/support')
+
+        # default src directory
+        assert s.wantDirectory('lib')
+        assert s.wantDirectory('src')
+
+        # FIXME move to functional tests
+        
+        # this looks on disk for support/foo, which is a package
+        here = os.path.abspath(os.path.dirname(__file__))
+        support = os.path.join(here, 'support')
+        tp = os.path.normpath(os.path.join(support, 'foo'))
+        assert s.wantDirectory(tp)
+        # this looks for support, which is not a package
+        assert not s.wantDirectory(support)        
+        
+    def test_want_file(self):
+
+        #logging.getLogger('nose.selector').setLevel(logging.DEBUG)
+        #logging.basicConfig()
+        
+        c = Config()
+        c.where = [absdir(os.path.join(os.path.dirname(__file__), 'support'))]
+        base = c.where[0]
+        s = Selector(c)
+
+        assert not s.wantFile('setup.py')
+        assert not s.wantFile('/some/path/to/setup.py')
+        assert not s.wantFile('ez_setup.py')
+        assert not s.wantFile('.test.py')
+        assert not s.wantFile('_test.py')
+        assert not s.wantFile('setup_something.py')
+        
+        assert s.wantFile('test.py')
+        assert s.wantFile('foo/test_foo.py')
+        assert s.wantFile('bar/baz/test.py')
+        assert not s.wantFile('foo.py')
+        assert not s.wantFile('test_data.txt')
+        assert not s.wantFile('data.text')
+        assert not s.wantFile('bar/baz/__init__.py')
+        
+    def test_want_function(self):
+        def foo():
+            pass
+        def test_foo():
+            pass
+        def test_bar():
+            pass
+        
+        s = Selector(Config())
+        assert s.wantFunction(test_bar)
+        assert s.wantFunction(test_foo)
+        assert not s.wantFunction(foo)
+
+        test_foo.__test__ = False
+        assert not s.wantFunction(test_foo), \
+               "Failed to respect __test__ = False"
+
+    def test_want_method(self):
+        class Baz:
+            def test_me(self):
+                pass
+            def test_too(self):
+                pass
+            def other(self):
+                pass
+            def test_not_test(self):
+                pass
+            test_not_test.__test__ = False
+            
+        s = Selector(Config())
+        
+        assert s.wantMethod(Baz.test_me)
+        assert s.wantMethod(Baz.test_too)
+        assert not s.wantMethod(Baz.other)
+        assert not s.wantMethod(Baz.test_not_test), \
+               "Failed to respect __test__ = False"
+        
+    def test_want_module(self):
+        m = mod('whatever')
+        m2 = mod('this.that')
+        m3 = mod('this.that.another')
+        m4 = mod('this.that.another.one')
+        m5 = mod('test.something')
+        m6 = mod('a.test')
+        m7 = mod('my_tests')
+        m8 = mod('__main__')
+        
+        s = Selector(Config())
+        assert not s.wantModule(m)
+        assert not s.wantModule(m2)
+        assert not s.wantModule(m3)
+        assert not s.wantModule(m4)
+        assert not s.wantModule(m5)
+        assert s.wantModule(m6)
+        assert s.wantModule(m7)
+        assert s.wantModule(m8)
+
+        m6.__test__ = False
+        assert not s.wantModule(m6), "Failed to respect __test__ = False"
+
+        
+if __name__ == '__main__':
+    # log.setLevel(logging.DEBUG)
+    unittest.main()
diff --git a/unit_tests/test_selector.pyc b/unit_tests/test_selector.pyc
new file mode 100644 (file)
index 0000000..f31718b
Binary files /dev/null and b/unit_tests/test_selector.pyc differ
diff --git a/unit_tests/test_selector_plugins$py.class b/unit_tests/test_selector_plugins$py.class
new file mode 100644 (file)
index 0000000..1b29224
Binary files /dev/null and b/unit_tests/test_selector_plugins$py.class differ
diff --git a/unit_tests/test_selector_plugins.py b/unit_tests/test_selector_plugins.py
new file mode 100644 (file)
index 0000000..7682d79
--- /dev/null
@@ -0,0 +1,30 @@
+import unittest
+import nose.selector
+from nose.config import Config
+from nose.plugins.base import Plugin
+from nose.plugins.manager import PluginManager
+
+class TestSelectorPlugins(unittest.TestCase):
+
+    def test_rejection(self):
+        class EvilSelector(Plugin):
+            def wantFile(self, filename, package=None):
+                if 'good' in filename:
+                    return False
+                return None
+
+        c = Config(plugins=PluginManager(plugins=[EvilSelector()]))
+        s = nose.selector.Selector(c)
+        s2 = nose.selector.Selector(Config())
+        
+        assert s.wantFile('test_neutral.py')
+        assert s2.wantFile('test_neutral.py')
+        
+        assert s.wantFile('test_evil.py')
+        assert s2.wantFile('test_evil.py')
+        
+        assert not s.wantFile('test_good.py')
+        assert s2.wantFile('test_good.py')
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_selector_plugins.pyc b/unit_tests/test_selector_plugins.pyc
new file mode 100644 (file)
index 0000000..d6d70ac
Binary files /dev/null and b/unit_tests/test_selector_plugins.pyc differ
diff --git a/unit_tests/test_skip_plugin$py.class b/unit_tests/test_skip_plugin$py.class
new file mode 100644 (file)
index 0000000..0dd9043
Binary files /dev/null and b/unit_tests/test_skip_plugin$py.class differ
diff --git a/unit_tests/test_skip_plugin.py b/unit_tests/test_skip_plugin.py
new file mode 100644 (file)
index 0000000..c1dccee
--- /dev/null
@@ -0,0 +1,130 @@
+import unittest
+from nose.config import Config
+from nose.plugins.skip import Skip, SkipTest
+from nose.result import TextTestResult
+from StringIO import StringIO
+from nose.result import _TextTestResult
+from optparse import OptionParser
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+
+
+class TestSkipPlugin(unittest.TestCase):
+
+    def test_api_present(self):
+        sk = Skip()
+        sk.addOptions
+        sk.configure
+        sk.prepareTestResult
+
+    def test_prepare_patches_result(self):
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 1)
+        sk = Skip()
+        sk.prepareTestResult(res)
+        res._orig_addError
+        res._orig_printErrors
+        res._orig_wasSuccessful
+        res.skipped
+        self.assertEqual(res.errorClasses,
+                         {SkipTest: (res.skipped, 'SKIP', False)})
+
+        # result w/out print works too
+        res = unittest.TestResult()
+        sk = Skip()
+        sk.prepareTestResult(res)
+        res._orig_addError
+        res.skipped
+        self.assertEqual(res.errorClasses,
+                         {SkipTest: (res.skipped, 'SKIP', False)})
+
+    def test_patched_result_handles_skip(self):
+        res = unittest.TestResult()
+        sk = Skip()
+        sk.prepareTestResult(res)
+
+        class TC(unittest.TestCase):
+            def test(self):
+                raise SkipTest('skip me')
+
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Skip was not caught: %s" % res.errors
+        assert res.skipped
+        assert res.skipped[0][0] is test
+
+    def test_patches_only_when_needed(self):
+        class NoPatch(unittest.TestResult):
+            def __init__(self):
+                self.errorClasses = {}
+
+        res = NoPatch()
+        sk = Skip()
+        sk.prepareTestResult(res)
+        assert not hasattr(res, '_orig_addError'), \
+               "Skip patched a result class it didn't need to patch"
+
+
+    def test_skip_output(self):
+        class TC(unittest.TestCase):
+            def test(self):
+                raise SkipTest('skip me')
+
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, 1)
+        sk = Skip()
+        sk.prepareTestResult(res)
+
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Skip was not caught: %s" % res.errors
+        assert res.skipped
+
+        res.printErrors()
+        out = stream.getvalue()
+        print out
+        assert out
+        assert out.strip() == "S"
+        assert res.wasSuccessful()
+
+    def test_skip_output_verbose(self):
+
+        class TC(unittest.TestCase):
+            def test(self):
+                raise SkipTest('skip me too')
+
+        stream = _WritelnDecorator(StringIO())
+        res = _TextTestResult(stream, 0, verbosity=2)
+        sk = Skip()
+        sk.prepareTestResult(res)
+        test = TC('test')
+        test(res)
+        assert not res.errors, "Skip was not caught: %s" % res.errors
+        assert res.skipped
+
+        res.printErrors()
+        out = stream.getvalue()
+        print out
+        assert out
+
+        assert ' ... SKIP' in out
+        assert 'skip me too' in out
+
+    def test_enabled_by_default(self):
+        sk = Skip()
+        assert sk.enabled, "Skip was not enabled by default"
+
+    def test_can_be_disabled(self):
+        parser = OptionParser()
+        sk = Skip()
+        sk.addOptions(parser)
+        options, args = parser.parse_args(['--no-skip'])
+        sk.configure(options, Config())
+        assert not sk.enabled, "Skip was not disabled by noSkip option"
+        
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_skip_plugin.pyc b/unit_tests/test_skip_plugin.pyc
new file mode 100644 (file)
index 0000000..3b9e954
Binary files /dev/null and b/unit_tests/test_skip_plugin.pyc differ
diff --git a/unit_tests/test_suite$py.class b/unit_tests/test_suite$py.class
new file mode 100644 (file)
index 0000000..4662b5b
Binary files /dev/null and b/unit_tests/test_suite$py.class differ
diff --git a/unit_tests/test_suite.py b/unit_tests/test_suite.py
new file mode 100644 (file)
index 0000000..b6eae20
--- /dev/null
@@ -0,0 +1,301 @@
+from nose.config import Config
+from nose import case
+from nose.suite import LazySuite, ContextSuite, ContextSuiteFactory, \
+     ContextList
+import imp
+import sys
+import unittest
+from mock import ResultProxyFactory, ResultProxy
+
+
+class TestLazySuite(unittest.TestCase):
+
+    def setUp(self):
+        class TC(unittest.TestCase):
+            def test_one(self):
+                pass
+            def test_two(self):
+                pass
+        self.TC = TC
+        
+    def test_test_generator(self):
+        TC = self.TC
+        tests = [TC('test_one'), TC('test_two')]
+        def gen_tests():
+            for test in tests:
+                yield test
+        suite = LazySuite(gen_tests)
+        self.assertEqual(list([test for test in suite]), tests)
+
+    def test_lazy_and_nonlazy(self):
+        TC = self.TC
+        tests = [TC('test_one'), TC('test_two')]
+        def gen_tests():
+            for test in tests:
+                yield test
+
+        nonlazy = LazySuite(tests)
+        lazy = LazySuite(gen_tests)
+
+        assert lazy
+        assert nonlazy
+
+        lazytests = []
+        nonlazytests = []
+        for t in lazy:
+            print "lazy %s" % t
+            lazytests.append(t)
+        for t in nonlazy:
+            print "nonlazy %s" % t
+            nonlazytests.append(t)
+        slazy = map(str, lazytests)
+        snonlazy = map(str, nonlazytests)
+        assert slazy == snonlazy, \
+               "Lazy and Nonlazy produced different test lists (%s vs %s)" \
+               % (slazy, snonlazy)
+
+    def test_lazy_nonzero(self):
+        """__nonzero__ works correctly for lazy suites"""
+        
+        TC = self.TC
+        tests = [TC('test_one'), TC('test_two')]
+        def gen_tests():
+            for test in tests:
+                yield test
+
+        lazy = LazySuite(gen_tests)
+        assert lazy
+        assert lazy
+        assert lazy
+
+        count = 0
+        for test in lazy:
+            print test
+            assert test
+            count += 1
+        self.assertEqual(count, 2, "Expected 2 tests, got %s" % count)
+        assert lazy
+
+        def gen_tests_empty():
+            for test in []:
+                yield test
+            return
+        empty = LazySuite(gen_tests_empty)
+        assert not empty
+        for test in empty:
+            assert False, "Loaded a test from empty suite: %s" % test
+
+class TestContextSuite(unittest.TestCase):
+
+    def setUp(self):
+        class TC(unittest.TestCase):
+            def test_one(self):
+                pass
+            def test_two(self):
+                pass
+        self.TC = TC
+
+    def test_tests_are_wrapped(self):
+        """Tests in a context suite are wrapped"""
+        suite = ContextSuite(
+            [self.TC('test_one'), self.TC('test_two')])
+        for test in suite:
+            assert isinstance(test.test, self.TC)
+
+    def test_nested_context_suites(self):
+        """Nested suites don't re-wrap"""
+        suite = ContextSuite(
+            [self.TC('test_one'), self.TC('test_two')])
+        suite2 = ContextSuite(suite)
+        suite3 = ContextSuite([suite2])
+
+        # suite3 is [suite2]
+        tests = [t for t in suite3]
+        assert isinstance(tests[0], ContextSuite)
+        # suite2 is [suite]
+        tests = [t for t in tests[0]]
+        assert isinstance(tests[0], ContextSuite)
+        # suite is full of wrapped tests
+        tests = [t for t in tests[0]]
+        cases = filter(lambda t: isinstance(t, case.Test), tests)
+        assert cases
+        assert len(cases) == len(tests)
+
+        # sub-suites knows they have a context
+        #assert suite.context is None
+        #assert suite2.context is suite
+        #assert suite3.context is suite2
+
+    def test_context_fixtures_called(self):
+        class P:
+            was_setup = False
+            was_torndown = False
+            def setup(self):
+                self.was_setup = True
+
+            def teardown(self):
+                self.was_torndown = True
+
+        context = P()
+        suite = ContextSuite(
+            [self.TC('test_one'), self.TC('test_two')],
+            context=context)
+        res = unittest.TestResult()
+        suite(res)
+
+        assert not res.errors, res.errors
+        assert not res.failures, res.failures
+        assert context.was_setup
+        assert context.was_torndown
+
+    def test_context_fixtures_for_ancestors(self):
+        top = imp.new_module('top')
+        top.bot = imp.new_module('top.bot')
+        top.bot.end = imp.new_module('top.bot.end')
+
+        sys.modules['top'] = top
+        sys.modules['top.bot'] = top.bot
+        sys.modules['top.bot.end'] = top.bot.end
+
+        class TC(unittest.TestCase):
+            def runTest(self):
+                pass
+        top.bot.TC = TC
+        TC.__module__ = 'top.bot'
+
+        # suite with just TC test
+        # this suite should call top and top.bot setup
+        csf = ContextSuiteFactory()
+        suite = csf(ContextList([TC()], context=top.bot))
+
+        suite.setUp()
+        assert top in csf.was_setup, "Ancestor not set up"
+        assert top.bot in csf.was_setup, "Context not set up"
+        suite.has_run = True
+        suite.tearDown()
+        assert top in csf.was_torndown, "Ancestor not torn down"
+        assert top.bot in csf.was_torndown, "Context not torn down"
+
+        # wrapped suites
+        # the outer suite sets up its context, the inner
+        # its context only, without re-setting up the outer context
+        csf = ContextSuiteFactory()
+        inner_suite = csf(ContextList([TC()], context=top.bot)) 
+        suite = csf(ContextList(inner_suite, context=top))
+
+        suite.setUp()
+        assert top in csf.was_setup
+        assert not top.bot in csf.was_setup
+        inner_suite.setUp()
+        assert top in csf.was_setup
+        assert top.bot in csf.was_setup
+        assert csf.was_setup[top] is suite
+        assert csf.was_setup[top.bot] is inner_suite
+
+    def test_context_fixtures_setup_fails(self):
+        class P:
+            was_setup = False
+            was_torndown = False
+            def setup(self):
+                self.was_setup = True
+                assert False, "Setup failed"
+
+            def teardown(self):
+                self.was_torndown = True
+
+        context = P()
+        suite = ContextSuite(
+            [self.TC('test_one'), self.TC('test_two')],
+            context=context)
+        res = unittest.TestResult()
+        suite(res)
+
+        assert not res.failures, res.failures
+        assert res.errors, res.errors
+        assert context.was_setup
+        assert not context.was_torndown
+        assert res.testsRun == 0, \
+               "Expected to run no tests but ran %s" % res.testsRun
+
+    def test_context_fixtures_no_tests_no_setup(self):
+        class P:
+            was_setup = False
+            was_torndown = False
+            def setup(self):
+                self.was_setup = True
+
+            def teardown(self):
+                self.was_torndown = True
+
+        context = P()
+        suite = ContextSuite([], context=context)
+        res = unittest.TestResult()
+        suite(res)
+
+        assert not res.failures, res.failures
+        assert not res.errors, res.errors
+        assert not context.was_setup
+        assert not context.was_torndown
+        assert res.testsRun == 0, \
+               "Expected to run no tests but ran %s" % res.testsRun
+
+    def test_result_proxy_used(self):
+        class TC(unittest.TestCase):
+            def runTest(self):
+                raise Exception("error")
+            
+        ResultProxy.called[:] = []
+        res = unittest.TestResult()
+        config = Config()
+
+        suite = ContextSuite([TC()], resultProxy=ResultProxyFactory())
+        suite(res)
+        calls = [ c[0] for c in ResultProxy.called ]
+        self.assertEqual(calls, ['beforeTest', 'startTest',
+                                 'addError', 'stopTest', 'afterTest'])
+
+
+class TestContextSuiteFactory(unittest.TestCase):
+            
+    def test_ancestry(self):
+        top = imp.new_module('top')
+        top.bot = imp.new_module('top.bot')
+        top.bot.end = imp.new_module('top.bot.end')
+        
+        sys.modules['top'] = top
+        sys.modules['top.bot'] = top.bot
+        sys.modules['top.bot.end'] = top.bot.end
+        
+        class P:
+            pass
+        top.bot.P = P
+        P.__module__ = 'top.bot'
+
+        csf = ContextSuiteFactory()
+        P_ancestors = list([a for a in csf.ancestry(P)])
+        self.assertEqual(P_ancestors, [top.bot, top])
+
+        end_ancestors = list([a for a in csf.ancestry(top.bot.end)])
+        self.assertEqual(end_ancestors, [top.bot, top])
+
+        bot_ancestors = list([a for a in csf.ancestry(top.bot)])
+        self.assertEqual(bot_ancestors, [top])
+
+        top_ancestors = list([a for a in csf.ancestry(top)])
+        self.assertEqual(top_ancestors, [])
+
+
+if __name__ == '__main__':
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
+        
+#     class TC(unittest.TestCase):
+#             def runTest(self):
+#                 raise Exception("error")
+            
+#     ResultProxy.called[:] = []
+#     res = unittest.TestResult()
+#     config = Config()
+
+    
diff --git a/unit_tests/test_suite.pyc b/unit_tests/test_suite.pyc
new file mode 100644 (file)
index 0000000..31fa481
Binary files /dev/null and b/unit_tests/test_suite.pyc differ
diff --git a/unit_tests/test_tools$py.class b/unit_tests/test_tools$py.class
new file mode 100644 (file)
index 0000000..c0f1baf
Binary files /dev/null and b/unit_tests/test_tools$py.class differ
diff --git a/unit_tests/test_tools.py b/unit_tests/test_tools.py
new file mode 100644 (file)
index 0000000..839d12b
--- /dev/null
@@ -0,0 +1,207 @@
+import sys
+import time
+import unittest
+from nose.tools import *
+
+compat_24 =  sys.version_info >= (2, 4)
+
+class TestTools(unittest.TestCase):
+
+    def test_ok(self):
+        ok_(True)
+        try:
+            ok_(False, "message")
+        except AssertionError, e:
+            assert str(e) == "message"
+        else:
+            self.fail("ok_(False) did not raise assertion error")
+
+    def test_eq(self):
+        eq_(1, 1)
+        try:
+            eq_(1, 0, "message")
+        except AssertionError, e:
+            assert str(e) == "message"
+        else:
+            self.fail("eq_(1, 0) did not raise assertion error")
+        try:
+            eq_(1, 0)
+        except AssertionError, e:
+            assert str(e) == "1 != 0"
+        else:
+            self.fail("eq_(1, 0) did not raise assertion error")
+
+    def test_raises(self):
+        from nose.case import FunctionTestCase
+        
+        def raise_typeerror():
+            raise TypeError("foo")
+
+        def noraise():
+            pass
+        
+        raise_good = raises(TypeError)(raise_typeerror)
+        raise_other = raises(ValueError)(raise_typeerror)
+        no_raise = raises(TypeError)(noraise)
+
+        tc = FunctionTestCase(raise_good)
+        self.assertEqual(str(tc), "%s.%s" % (__name__, 'raise_typeerror'))
+        
+        raise_good()
+        try:
+            raise_other()
+        except TypeError, e:
+            pass
+        else:
+            self.fail("raises did pass through unwanted exception")
+
+        try:
+            no_raise()
+        except AssertionError, e:
+            pass
+        else:
+            self.fail("raises did not raise assertion error on no exception")
+
+    def test_timed(self):
+
+        def too_slow():
+            time.sleep(.3)
+        too_slow = timed(.2)(too_slow)
+            
+        def quick():
+            time.sleep(.1)
+        quick = timed(.2)(quick)
+
+        quick()
+        try:
+            too_slow()
+        except TimeExpired:
+            pass
+        else:
+            self.fail("Slow test did not throw TimeExpired")
+
+    def test_make_decorator(self):
+        def func():
+            pass
+        func.setup = 'setup'
+        func.teardown = 'teardown'
+
+        def f1():
+            pass
+        
+        f2 = make_decorator(func)(f1)
+        
+        assert f2.setup == 'setup'
+        assert f2.teardown == 'teardown'
+
+    def test_nested_decorators(self):
+        from nose.tools import raises, timed, with_setup
+        
+        def test():
+            pass
+        
+        def foo():
+            pass
+        
+        test = with_setup(foo, foo)(test)
+        test = timed(1.0)(test)
+        test = raises(TypeError)(test)
+        assert test.setup == foo
+        assert test.teardown == foo
+
+    def test_decorator_func_sorting(self):
+        from nose.tools import raises, timed, with_setup
+        from nose.util import func_lineno
+        
+        def test1():
+            pass
+
+        def test2():
+            pass
+
+        def test3():
+            pass
+
+        def foo():
+            pass
+
+        test1_pos = func_lineno(test1)
+        test2_pos = func_lineno(test2)
+        test3_pos = func_lineno(test3)
+
+        test1 = raises(TypeError)(test1)
+        test2 = timed(1.0)(test2)
+        test3 = with_setup(foo)(test3)
+
+        self.assertEqual(func_lineno(test1), test1_pos)
+        self.assertEqual(func_lineno(test2), test2_pos)
+        self.assertEqual(func_lineno(test3), test3_pos)
+        
+    def test_testcase_funcs(self):
+        import nose.tools
+        tc_asserts = [ at for at in dir(nose.tools)
+                       if at.startswith('assert_') ]
+        print tc_asserts
+        
+        # FIXME: not sure which of these are in all supported
+        # versions of python
+        assert 'assert_raises' in tc_asserts
+        if compat_24:
+            assert 'assert_true' in tc_asserts
+
+    def test_multiple_with_setup(self):
+        from nose.tools import with_setup
+        from nose.case import FunctionTestCase
+        from unittest import TestResult
+        
+        called = []
+        def test():
+            called.append('test')
+
+        def test2():
+            called.append('test2')
+
+        def test3():
+            called.append('test3')
+            
+        def s1():
+            called.append('s1')
+
+        def s2():
+            called.append('s2')
+
+        def s3():
+            called.append('s3')
+            
+        def t1():
+            called.append('t1')
+
+        def t2():
+            called.append('t2')
+
+        def t3():
+            called.append('t3')
+            
+        ws1 = with_setup(s1, t1)(test)
+        case1 = FunctionTestCase(ws1)
+        case1(TestResult())
+        self.assertEqual(called, ['s1', 'test', 't1'])
+
+        called[:] = []
+        ws2 = with_setup(s2, t2)(test2)
+        ws2 = with_setup(s1, t1)(ws2)
+        case2 = FunctionTestCase(ws2)
+        case2(TestResult())
+        self.assertEqual(called, ['s1', 's2', 'test2', 't2', 't1'])
+
+        called[:] = []
+        ws3 = with_setup(s3, t3)(test3)
+        ws3 = with_setup(s2, t2)(ws3)
+        ws3 = with_setup(s1, t1)(ws3)
+        case3 = FunctionTestCase(ws3)
+        case3(TestResult())
+        self.assertEqual(called, ['s1', 's2', 's3',
+                                  'test3', 't3', 't2', 't1'])
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_tools.pyc b/unit_tests/test_tools.pyc
new file mode 100644 (file)
index 0000000..f7be117
Binary files /dev/null and b/unit_tests/test_tools.pyc differ
diff --git a/unit_tests/test_twisted$py.class b/unit_tests/test_twisted$py.class
new file mode 100644 (file)
index 0000000..bc1768d
Binary files /dev/null and b/unit_tests/test_twisted$py.class differ
diff --git a/unit_tests/test_twisted.py b/unit_tests/test_twisted.py
new file mode 100644 (file)
index 0000000..be5041d
--- /dev/null
@@ -0,0 +1,93 @@
+from nose.exc import SkipTest
+from nose.tools import *
+from nose.twistedtools import *
+try:    
+    from twisted.internet.defer import Deferred
+    from twisted.internet.error import DNSLookupError
+except ImportError:
+    raise SkipTest('twisted not available; skipping')
+
+_multiprocess_ = False
+
+
+def teardown():
+    # print "stopping reactor"
+    stop_reactor()
+
+class CustomError(Exception):
+    pass
+
+# FIXME move all dns-using tests to functional
+
+# Should succeed unless google is down
+#@deferred
+def test_resolve():
+    return reactor.resolve("www.google.com")
+test_resolve = deferred()(test_resolve)
+
+# Raises TypeError because the function does not return a Deferred
+#@raises(TypeError)
+#@deferred()
+def test_raises_bad_return():
+    print reactor
+    reactor.resolve("www.python.org")
+test_raises_bad_return = raises(TypeError)(deferred()(test_raises_bad_return))
+
+# Check we propagate twisted Failures as Exceptions
+# (XXX this test might take some time: find something better?)
+#@raises(DNSLookupError)
+#@deferred()
+def test_raises_twisted_error():
+    return reactor.resolve("x.y.z")
+test_raises_twisted_error = raises(DNSLookupError)(
+    deferred()(test_raises_twisted_error))
+
+# Check we detect Exceptions inside the callback chain
+#@raises(CustomError)
+#@deferred(timeout=1.0)
+def test_raises_callback_error():
+    d = Deferred()
+    def raise_error(_):
+        raise CustomError()
+    def finish():
+        d.callback(None)
+    d.addCallback(raise_error)
+    reactor.callLater(0.01, finish)
+    return d
+test_raises_callback_error = raises(CustomError)(
+    deferred(timeout=1.0)(test_raises_callback_error))
+
+# Check we detect Exceptions inside the test body
+#@raises(CustomError)
+#@deferred(timeout=1.0)
+def test_raises_plain_error():
+    raise CustomError
+test_raises_plain_error = raises(CustomError)(
+    deferred(timeout=1.0)(test_raises_plain_error))
+
+# The deferred is triggered before the timeout: ok
+#@deferred(timeout=1.0)
+def test_timeout_ok():
+    d = Deferred()
+    def finish():
+        d.callback(None)
+    reactor.callLater(0.01, finish)
+    return d
+test_timeout_ok = deferred(timeout=1.0)(test_timeout_ok)
+
+# The deferred is triggered after the timeout: failure
+#@raises(TimeExpired)
+#@deferred(timeout=0.1)
+def test_timeout_expired():
+    d = Deferred()
+    def finish():
+        d.callback(None)
+    reactor.callLater(1.0, finish)
+    return d
+test_timeout_expired = raises(TimeExpired)(
+    deferred(timeout=0.1)(test_timeout_expired))
+
+
+if __name__ == '__main__':
+    from nose import runmodule
+    runmodule()
diff --git a/unit_tests/test_twisted.pyc b/unit_tests/test_twisted.pyc
new file mode 100644 (file)
index 0000000..a58da78
Binary files /dev/null and b/unit_tests/test_twisted.pyc differ
diff --git a/unit_tests/test_twisted_testcase$py.class b/unit_tests/test_twisted_testcase$py.class
new file mode 100644 (file)
index 0000000..c301da4
Binary files /dev/null and b/unit_tests/test_twisted_testcase$py.class differ
diff --git a/unit_tests/test_twisted_testcase.py b/unit_tests/test_twisted_testcase.py
new file mode 100644 (file)
index 0000000..850d6e7
--- /dev/null
@@ -0,0 +1,11 @@
+try:
+    from twisted.trial import unittest
+except ImportError:
+    from nose import SkipTest
+    raise SkipTest('twisted not available; skipping')
+
+class TestTwisted(unittest.TestCase):
+
+    def test(self):
+        pass
+
diff --git a/unit_tests/test_twisted_testcase.pyc b/unit_tests/test_twisted_testcase.pyc
new file mode 100644 (file)
index 0000000..22bbc0d
Binary files /dev/null and b/unit_tests/test_twisted_testcase.pyc differ
diff --git a/unit_tests/test_utils$py.class b/unit_tests/test_utils$py.class
new file mode 100644 (file)
index 0000000..bd780eb
Binary files /dev/null and b/unit_tests/test_utils$py.class differ
diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py
new file mode 100644 (file)
index 0000000..13ccf6f
--- /dev/null
@@ -0,0 +1,180 @@
+import os
+import unittest
+import nose
+from nose import case
+from nose.pyversion import unbound_method
+# don't import * -- some util functions look testlike
+from nose import util
+
+np = os.path.normpath
+
+class TestUtils(unittest.TestCase):
+    
+    def test_file_like(self):
+        file_like = util.file_like
+        assert file_like('a/file')
+        assert file_like('file.py')
+        assert file_like('/some/file.py')
+        assert not file_like('a.file')
+        assert not file_like('some.package')
+        assert file_like('a-file')
+        assert not file_like('test')
+        
+    def test_split_test_name(self):
+        split_test_name = util.split_test_name
+        assert split_test_name('a.package:Some.method') == \
+            (None, 'a.package', 'Some.method')
+        assert split_test_name('some.module') == \
+            (None, 'some.module', None)
+        assert split_test_name('this/file.py:func') == \
+            (np('this/file.py'), None, 'func')
+        assert split_test_name('some/file.py') == \
+            (np('some/file.py'), None, None)
+        assert split_test_name(':Baz') == \
+            (None, None, 'Baz')
+        assert split_test_name('foo:bar/baz.py') == \
+            (np('foo:bar/baz.py'), None, None)
+
+    def test_split_test_name_windows(self):
+        # convenience
+        stn = util.split_test_name
+        self.assertEqual(stn(r'c:\some\path.py:a_test'),
+                         (np(r'c:\some\path.py'), None, 'a_test'))
+        self.assertEqual(stn(r'c:\some\path.py'),
+                         (np(r'c:\some\path.py'), None, None))
+        self.assertEqual(stn(r'c:/some/other/path.py'),
+                         (np(r'c:/some/other/path.py'), None, None))
+        self.assertEqual(stn(r'c:/some/other/path.py:Class.test'),
+                         (np(r'c:/some/other/path.py'), None, 'Class.test'))
+        try:
+            stn('cat:dog:something')
+        except ValueError:
+            pass
+        else:
+            self.fail("Nonsense test name should throw ValueError")
+
+    def test_test_address(self):
+        # test addresses are specified as
+        #     package.module:class.method
+        #     /path/to/file.py:class.method
+        # converted into 3-tuples (file, module, callable)
+        # all terms optional
+        test_address = util.test_address
+        absfile = util.absfile
+        class Foo:
+            def bar(self):
+                pass
+        def baz():
+            pass
+
+        f = Foo()
+
+        class FooTC(unittest.TestCase):
+            def test_one(self):
+                pass
+            def test_two(self):
+                pass
+
+        class CustomTestType(type):
+            pass
+        class CustomTC(unittest.TestCase):
+            __metaclass__ = CustomTestType
+            def test_one(self):
+                pass
+            def test_two(self):
+                pass
+
+        foo_funct = case.FunctionTestCase(baz)
+        foo_functu = unittest.FunctionTestCase(baz)
+
+        foo_mtc = case.MethodTestCase(unbound_method(Foo, Foo.bar))
+
+        me = util.src(absfile(__file__))
+        self.assertEqual(test_address(baz),
+                         (me, __name__, 'baz'))
+        assert test_address(Foo) == (me, __name__, 'Foo')
+        assert test_address(unbound_method(Foo, Foo.bar)) == (me, __name__,
+                                                              'Foo.bar')
+        assert test_address(f) == (me, __name__, 'Foo')
+        assert test_address(f.bar) == (me, __name__, 'Foo.bar')
+        assert test_address(nose) == (
+            util.src(absfile(nose.__file__)), 'nose', None)
+
+        # test passing the actual test callable, as the
+        # missed test plugin must do
+        self.assertEqual(test_address(FooTC('test_one')),
+                         (me, __name__, 'FooTC.test_one'))
+        self.assertEqual(test_address(CustomTC('test_one')),
+                         (me, __name__, 'CustomTC.test_one'))
+        self.assertEqual(test_address(foo_funct),
+                         (me, __name__, 'baz'))
+        self.assertEqual(test_address(foo_functu),
+                         (me, __name__, 'baz'))
+        self.assertEqual(test_address(foo_mtc),
+                         (me, __name__, 'Foo.bar'))
+
+    def test_isclass_detects_classes(self):
+        class TC(unittest.TestCase):
+            pass
+        class TC_Classic:
+            pass
+        class TC_object(object):
+            pass
+        # issue153 -- was not detecting custom typed classes...
+        class TCType(type):
+            pass
+        class TC_custom_type(object):
+            __metaclass__ = TCType
+        class TC_unittest_custom_type(unittest.TestCase):
+            __metaclass__ = TCType
+        
+        assert util.isclass(TC), "failed to detect %s as class" % TC
+        assert util.isclass(TC_Classic), "failed to detect %s as class" % TC_Classic
+        assert util.isclass(TC_object), "failed to detect %s as class" % TC_object
+        assert util.isclass(TC_custom_type), "failed to detect %s as class" % TC_custom_type
+        assert util.isclass(TC_unittest_custom_type), "failed to detect %s as class" % TC_unittest_custom_type
+        
+    def test_isclass_ignores_nonclass_things(self):
+        anint = 1
+        adict = {}
+        assert not util.isclass(anint), "should have ignored %s" % type(anint)
+        assert not util.isclass(adict), "should have ignored %s" % type(adict)
+
+    def test_tolist(self):
+        tolist = util.tolist
+        assert tolist('foo') == ['foo']
+        assert tolist(['foo', 'bar']) == ['foo', 'bar']
+        assert tolist('foo,bar') == ['foo', 'bar']
+        self.assertEqual(tolist('.*foo/.*,.1'), ['.*foo/.*', '.1'])
+
+    def test_try_run(self):
+        try_run = util.try_run
+        import imp
+
+        def bar():
+            pass
+
+        def bar_m(mod):
+            pass
+
+        class Bar:
+            def __call__(self):
+                pass
+
+        class Bar_m:
+            def __call__(self, mod):
+                pass
+        
+        foo = imp.new_module('foo')
+        foo.bar = bar
+        foo.bar_m = bar_m
+        foo.i_bar = Bar()
+        foo.i_bar_m = Bar_m()
+
+        try_run(foo, ('bar',))
+        try_run(foo, ('bar_m',))
+        try_run(foo, ('i_bar',))
+        try_run(foo, ('i_bar_m',))
+        
+if __name__ == '__main__':
+    unittest.main()
diff --git a/unit_tests/test_utils.pyc b/unit_tests/test_utils.pyc
new file mode 100644 (file)
index 0000000..9dfbc8c
Binary files /dev/null and b/unit_tests/test_utils.pyc differ
diff --git a/unit_tests/test_xunit$py.class b/unit_tests/test_xunit$py.class
new file mode 100644 (file)
index 0000000..70ab3d3
Binary files /dev/null and b/unit_tests/test_xunit$py.class differ
diff --git a/unit_tests/test_xunit.py b/unit_tests/test_xunit.py
new file mode 100644 (file)
index 0000000..3ee7101
--- /dev/null
@@ -0,0 +1,343 @@
+
+import sys
+import os
+import optparse
+import re
+import unittest
+from xml.sax import saxutils
+
+from nose.pyversion import UNICODE_STRINGS
+from nose.tools import eq_
+from nose.plugins.xunit import Xunit, escape_cdata, id_split
+from nose.exc import SkipTest
+from nose.config import Config
+
+def mktest():
+    class TC(unittest.TestCase):
+        def runTest(self):
+            pass
+    test = TC()
+    return test
+
+mktest.__test__ = False
+
+time_taken = re.compile(r'\d\.\d\d')
+
+class TestEscaping(unittest.TestCase):
+
+    def setUp(self):
+        self.x = Xunit()
+
+    def test_all(self):
+        eq_(self.x._quoteattr(
+            '''<baz src="http://foo?f=1&b=2" quote="inix hubris 'maximus'?" />'''),
+            ('"&lt;baz src=&quot;http://foo?f=1&amp;b=2&quot; '
+                'quote=&quot;inix hubris \'maximus\'?&quot; /&gt;"'))
+
+    def test_unicode_is_utf8_by_default(self):
+        if not UNICODE_STRINGS:
+            eq_(self.x._quoteattr(u'Ivan Krsti\u0107'),
+                '"Ivan Krsti\xc4\x87"')
+
+    def test_unicode_custom_utf16_madness(self):
+        self.x.encoding = 'utf-16'
+        utf16 = self.x._quoteattr(u'Ivan Krsti\u0107')[1:-1]
+
+        if UNICODE_STRINGS:
+           # If all internal strings are unicode, then _quoteattr shouldn't
+           # have changed anything.
+            eq_(utf16, u'Ivan Krsti\u0107')
+        else:
+            # to avoid big/little endian bytes, assert that we can put it back:
+            eq_(utf16.decode('utf16'), u'Ivan Krsti\u0107')
+
+    def test_control_characters(self):
+        # quoting of \n, \r varies in diff. python versions
+        n = saxutils.quoteattr('\n')[1:-1]
+        r = saxutils.quoteattr('\r')[1:-1]
+        eq_(self.x._quoteattr('foo\n\b\f\r'), '"foo%s??%s"' % (n, r))
+        eq_(escape_cdata('foo\n\b\f\r'), 'foo\n??\r')
+
+class TestSplitId(unittest.TestCase):
+
+    def check_id_split(self, cls, name):
+        split = id_split('%s.%s' % (cls, name))
+        eq_(split[0], cls)
+        eq_(split[1], name)
+
+    def test_no_parenthesis(self):
+        self.check_id_split("test_parset", "test_args")
+
+    def test_no_dot_in_args(self):
+        self.check_id_split("test_parset", "test_args(('x', [1, 2]),)")
+
+    def test_dot_in_args(self):
+        self.check_id_split("test_parset", "test_args(('x.y', 1),)")
+
+    def test_grandchild_has_dot_in_args(self):
+        self.check_id_split("test_grandparset.test_parset",
+                            "test_args(('x.y', 1),)")
+
+class TestOptions(unittest.TestCase):
+
+    def test_defaults(self):
+        parser = optparse.OptionParser()
+        x = Xunit()
+        x.add_options(parser, env={})
+        (options, args) = parser.parse_args([])
+        eq_(options.xunit_file, "nosetests.xml")
+
+    def test_file_from_environ(self):
+        parser = optparse.OptionParser()
+        x = Xunit()
+        x.add_options(parser, env={'NOSE_XUNIT_FILE': "kangaroo.xml"})
+        (options, args) = parser.parse_args([])
+        eq_(options.xunit_file, "kangaroo.xml")
+
+    def test_file_from_opt(self):
+        parser = optparse.OptionParser()
+        x = Xunit()
+        x.add_options(parser, env={})
+        (options, args) = parser.parse_args(["--xunit-file=blagojevich.xml"])
+        eq_(options.xunit_file, "blagojevich.xml")
+
+class TestXMLOutputWithXML(unittest.TestCase):
+
+    def setUp(self):
+        self.xmlfile = os.path.abspath(
+            os.path.join(os.path.dirname(__file__), 
+                            'support', 'xunit.xml'))
+        parser = optparse.OptionParser()
+        self.x = Xunit()
+        self.x.add_options(parser, env={})
+        (options, args) = parser.parse_args([
+            "--with-xunit",
+            "--xunit-file=%s" % self.xmlfile
+        ])
+        self.x.configure(options, Config())
+
+        try:
+            import xml.etree.ElementTree
+        except ImportError:
+            self.ET = False
+        else:
+            self.ET = xml.etree.ElementTree
+
+    def tearDown(self):
+        os.unlink(self.xmlfile)
+
+    def get_xml_report(self):
+        class DummyStream:
+            pass
+        self.x.report(DummyStream())
+        f = open(self.xmlfile, 'rb')
+        return f.read()
+        f.close()
+
+    def test_addFailure(self):
+        test = mktest()
+        self.x.startTest(test)
+        try:
+            raise AssertionError("one is not 'equal' to two")
+        except AssertionError:
+            some_err = sys.exc_info()
+
+        self.x.addFailure(test, some_err)
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            eq_(tree.attrib['name'], "nosetests")
+            eq_(tree.attrib['tests'], "1")
+            eq_(tree.attrib['errors'], "0")
+            eq_(tree.attrib['failures'], "1")
+            eq_(tree.attrib['skip'], "0")
+
+            tc = tree.find("testcase")
+            eq_(tc.attrib['classname'], "test_xunit.TC")
+            eq_(tc.attrib['name'], "runTest")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+
+            err = tc.find("failure")
+            eq_(err.attrib['type'], "%s.AssertionError" % (AssertionError.__module__,))
+            err_lines = err.text.strip().split("\n")
+            eq_(err_lines[0], 'Traceback (most recent call last):')
+            eq_(err_lines[-1], 'AssertionError: one is not \'equal\' to two')
+            eq_(err_lines[-2], '    raise AssertionError("one is not \'equal\' to two")')
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert '<testsuite name="nosetests" tests="1" errors="0" failures="1" skip="0">' in result
+            assert '<testcase classname="test_xunit.TC" name="runTest"' in result
+            assert '<failure type="exceptions.AssertionError"' in result
+            assert "AssertionError: one is not 'equal' to two" in result
+            assert "AssertionError(\"one is not 'equal' to two\")" in result
+            assert '</failure></testcase></testsuite>' in result
+
+    def test_addFailure_early(self):
+        test = mktest()
+        try:
+            raise AssertionError("one is not equal to two")
+        except AssertionError:
+            some_err = sys.exc_info()
+
+        # add failure without startTest, due to custom TestResult munging?
+        self.x.addFailure(test, some_err)
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            tc = tree.find("testcase")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert ('<testcase classname="test_xunit.TC" '
+                    'name="runTest" time="0') in result
+
+    def test_addError(self):
+        test = mktest()
+        self.x.startTest(test)
+        try:
+            raise RuntimeError("some error happened")
+        except RuntimeError:
+            some_err = sys.exc_info()
+
+        self.x.addError(test, some_err)
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            eq_(tree.attrib['name'], "nosetests")
+            eq_(tree.attrib['tests'], "1")
+            eq_(tree.attrib['errors'], "1")
+            eq_(tree.attrib['failures'], "0")
+            eq_(tree.attrib['skip'], "0")
+
+            tc = tree.find("testcase")
+            eq_(tc.attrib['classname'], "test_xunit.TC")
+            eq_(tc.attrib['name'], "runTest")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+
+            err = tc.find("error")
+            eq_(err.attrib['type'], "%s.RuntimeError" % (RuntimeError.__module__,))
+            err_lines = err.text.strip().split("\n")
+            eq_(err_lines[0], 'Traceback (most recent call last):')
+            eq_(err_lines[-1], 'RuntimeError: some error happened')
+            eq_(err_lines[-2], '    raise RuntimeError("some error happened")')
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert '<testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">' in result
+            assert '<testcase classname="test_xunit.TC" name="runTest"' in result
+            assert '<error type="exceptions.RuntimeError"' in result
+            assert 'RuntimeError: some error happened' in result
+            assert '</error></testcase></testsuite>' in result
+
+    def test_non_utf8_error(self):
+        # See http://code.google.com/p/python-nose/issues/detail?id=395
+        test = mktest()
+        self.x.startTest(test)
+        try:
+            raise RuntimeError(chr(128)) # cannot encode as utf8 
+        except RuntimeError:
+            some_err = sys.exc_info()
+        self.x.addError(test, some_err)
+        result = self.get_xml_report()
+        print repr(result)
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            tc = tree.find("testcase")
+            err = tc.find("error")
+            if UNICODE_STRINGS:
+                eq_(err.attrib['message'],
+                    '\x80')
+            else:
+                eq_(err.attrib['message'],
+                    u'\ufffd')
+        else:
+            # this is a dumb test for 2.4-
+            assert 'RuntimeError: \xef\xbf\xbd' in result
+
+    def test_addError_early(self):
+        test = mktest()
+        try:
+            raise RuntimeError("some error happened")
+        except RuntimeError:
+            some_err = sys.exc_info()
+
+        # call addError without startTest
+        # which can happen if setup() raises an error
+        self.x.addError(test, some_err)
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            tc = tree.find("testcase")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert ('<testcase classname="test_xunit.TC" '
+                    'name="runTest" time="0') in result
+
+    def test_addSuccess(self):
+        test = mktest()
+        self.x.startTest(test)
+        self.x.addSuccess(test, (None,None,None))
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            eq_(tree.attrib['name'], "nosetests")
+            eq_(tree.attrib['tests'], "1")
+            eq_(tree.attrib['errors'], "0")
+            eq_(tree.attrib['failures'], "0")
+            eq_(tree.attrib['skip'], "0")
+
+            tc = tree.find("testcase")
+            eq_(tc.attrib['classname'], "test_xunit.TC")
+            eq_(tc.attrib['name'], "runTest")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert '<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">' in result
+            assert '<testcase classname="test_xunit.TC" name="runTest"' in result
+            assert '</testsuite>' in result
+
+    def test_addSuccess_early(self):
+        test = mktest()
+        # call addSuccess without startTest
+        # which can happen (?) -- did happen with JsLint plugin
+        self.x.addSuccess(test, (None,None,None))
+
+        result = self.get_xml_report()
+        print result
+
+        if self.ET:
+            tree = self.ET.fromstring(result)
+            tc = tree.find("testcase")
+            assert time_taken.match(tc.attrib['time']), (
+                        'Expected decimal time: %s' % tc.attrib['time'])
+        else:
+            # this is a dumb test for 2.4-
+            assert '<?xml version="1.0" encoding="UTF-8"?>' in result
+            assert ('<testcase classname="test_xunit.TC" '
+                    'name="runTest" time="0') in result
+
diff --git a/unit_tests/test_xunit.pyc b/unit_tests/test_xunit.pyc
new file mode 100644 (file)
index 0000000..d9375bc
Binary files /dev/null and b/unit_tests/test_xunit.pyc differ