refine docs, fix a marker/keywords bit, and add a test that request.keywords points to node.keywords.
This commit is contained in:
parent
7d747a1cde
commit
7c8755cc89
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.3.0.dev28'
|
||||
__version__ = '2.3.0.dev29'
|
||||
|
|
|
@ -1067,7 +1067,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
@property
|
||||
def keywords(self):
|
||||
""" keywords/markers dictionary for the underlying node. """
|
||||
return self._pyfuncitem.keywords
|
||||
return self.node.keywords
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
|
@ -1246,7 +1246,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
if scope == "function":
|
||||
return self._pyfuncitem
|
||||
elif scope == "session":
|
||||
return None
|
||||
return self.session
|
||||
elif scope == "class":
|
||||
x = self._pyfuncitem.getparent(pytest.Class)
|
||||
if x is not None:
|
||||
|
|
|
@ -26,7 +26,7 @@ you will see the return value of the function call::
|
|||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
|
@ -110,7 +110,7 @@ if you run this module::
|
|||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
|
|
|
@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
|
@ -78,7 +78,7 @@ of the failing function and hide the other one::
|
|||
|
||||
test_module.py:9: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
setting up <function test_func2 at 0x25dd230>
|
||||
setting up <function test_func2 at 0x2b04578>
|
||||
==================== 1 failed, 1 passed in 0.01 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
|
|
|
@ -44,7 +44,7 @@ then you can just invoke ``py.test`` without command line options::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
|
|
@ -34,7 +34,7 @@ test modules::
|
|||
|
||||
# content of conftest.py
|
||||
|
||||
from remoteinterpreter import RemoteInterpreter
|
||||
from .remoteinterpreter import RemoteInterpreter
|
||||
|
||||
@pytest.fixture
|
||||
def interp(request):
|
||||
|
@ -63,7 +63,7 @@ That's it, we can now run the test::
|
|||
$ py.test test_remoteinterpreter.py
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in <module>
|
||||
load_entry_point('pytest==2.3.0.dev27', 'console_scripts', 'py.test')()
|
||||
load_entry_point('pytest==2.3.0.dev28', 'console_scripts', 'py.test')()
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 473, in main
|
||||
config = _prepareconfig(args, plugins)
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig
|
||||
|
@ -98,9 +98,9 @@ That's it, we can now run the test::
|
|||
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-58/conftest.py", line 2, in <module>
|
||||
from remoteinterpreter import RemoteInterpreter
|
||||
ImportError: No module named remoteinterpreter
|
||||
File "/tmp/doc-exec-111/conftest.py", line 2, in <module>
|
||||
from .remoteinterpreter import RemoteInterpreter
|
||||
ValueError: Attempted relative import in non-package
|
||||
|
||||
.. _`tut-cmdlineoption`:
|
||||
|
||||
|
@ -150,7 +150,7 @@ Running it yields::
|
|||
$ py.test -q test_ssh.py -rs
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in <module>
|
||||
load_entry_point('pytest==2.3.0.dev27', 'console_scripts', 'py.test')()
|
||||
load_entry_point('pytest==2.3.0.dev28', 'console_scripts', 'py.test')()
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 473, in main
|
||||
config = _prepareconfig(args, plugins)
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig
|
||||
|
@ -185,7 +185,7 @@ Running it yields::
|
|||
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-58/conftest.py", line 2, in <module>
|
||||
File "/tmp/doc-exec-111/conftest.py", line 2, in <module>
|
||||
from myapp import MyApp
|
||||
ImportError: No module named myapp
|
||||
|
|
@ -26,29 +26,25 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
|||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/venv/1/bin/python
|
||||
cachedir: /tmp/doc-exec-66/.cache
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:3: test_send_http PASSED
|
||||
|
||||
=================== 1 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
================== 1 passed, 1 deselected in 0.00 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/venv/1/bin/python
|
||||
cachedir: /tmp/doc-exec-66/.cache
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py:6: test_something_quick PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 1 passed, 1 deselected in 0.01 seconds ==================
|
||||
================== 1 passed, 1 deselected in 0.00 seconds ==================
|
||||
|
||||
Registering markers
|
||||
-------------------------------------
|
||||
|
@ -69,8 +65,6 @@ You can ask which markers exist for your test suite - the list includes our just
|
|||
$ py.test --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.timeout(timeout, method=None): Set a timeout and timeout method on just one test item. The first argument, *timeout*, is the timeout in seconds while the keyword, *method*, takes the same values as the --timeout_method option.
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
@ -149,41 +143,38 @@ the given argument::
|
|||
|
||||
$ py.test -k send_http # running with the above defined examples
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_server.py .
|
||||
|
||||
=================== 3 tests deselected by '-ksend_http' ====================
|
||||
================== 1 passed, 3 deselected in 0.02 seconds ==================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k-send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
test_server.py .
|
||||
|
||||
=================== 1 tests deselected by '-k-send_http' ===================
|
||||
================== 3 passed, 1 deselected in 0.02 seconds ==================
|
||||
================== 3 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
||||
Or to only select the class::
|
||||
|
||||
$ py.test -kTestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_mark_classlevel.py ..
|
||||
|
||||
=================== 2 tests deselected by '-kTestClass' ====================
|
||||
================== 2 passed, 2 deselected in 0.02 seconds ==================
|
||||
================== 2 passed, 2 deselected in 0.01 seconds ==================
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
||||
|
@ -230,8 +221,7 @@ the test needs::
|
|||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py s
|
||||
|
@ -242,8 +232,7 @@ and here is one that specifies exactly the environment needed::
|
|||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py .
|
||||
|
@ -255,8 +244,6 @@ The ``--markers`` option always gives you a list of available markers::
|
|||
$ py.test --markers
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
|
||||
@pytest.mark.timeout(timeout, method=None): Set a timeout and timeout method on just one test item. The first argument, *timeout*, is the timeout in seconds while the keyword, *method*, takes the same values as the --timeout_method option.
|
||||
|
||||
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
|
||||
|
||||
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
|
||||
|
@ -360,22 +347,20 @@ then you will see two test skipped and two executed tests as expected::
|
|||
|
||||
$ py.test -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s.
|
||||
========================= short test summary info ==========================
|
||||
SKIP [2] /tmp/doc-exec-66/conftest.py:12: cannot run on platform linux2
|
||||
SKIP [2] /tmp/doc-exec-113/conftest.py:12: cannot run on platform linux2
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.02 seconds ====================
|
||||
=================== 2 passed, 2 skipped in 0.01 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
|
||||
$ py.test -m linux2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
plugins: xdist, bugzilla, cache, cli, pep8, oejskit, timeout, cov
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_plat.py .
|
||||
|
|
|
@ -27,7 +27,7 @@ now execute the test specification::
|
|||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
|
@ -56,7 +56,7 @@ consulted when reporting in ``verbose`` mode::
|
|||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml:1: usecase: ok PASSED
|
||||
|
@ -74,7 +74,7 @@ interesting to just look at the collection tree::
|
|||
|
||||
nonpython $ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
|
|
|
@ -104,7 +104,7 @@ this is a fully self-contained example which you can run with::
|
|||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py ....
|
||||
|
@ -116,7 +116,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
|||
|
||||
$ py.test --collectonly test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
|
@ -180,7 +180,7 @@ Let's first see how it looks like at collection time::
|
|||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
|
@ -195,7 +195,7 @@ And then when we run the test::
|
|||
================================= FAILURES =================================
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x1540440>
|
||||
db = <conftest.DB2 instance at 0x126b878>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
|
@ -250,7 +250,7 @@ argument sets to use for each test function. Let's run it::
|
|||
================================= FAILURES =================================
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x253bd88>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x2c24b90>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
|
|
|
@ -43,7 +43,7 @@ then the test collection looks like this::
|
|||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
|
@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this::
|
|||
|
||||
. $ py.test --collectonly pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
|
@ -91,7 +91,7 @@ You can always peek at the collection tree without running tests like this::
|
|||
<Function 'test_method'>
|
||||
<Function 'test_anothermethod'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
customizing test collection to find all .py files
|
||||
---------------------------------------------------------
|
||||
|
@ -135,7 +135,7 @@ interpreters and will leave out the setup.py file::
|
|||
|
||||
$ py.test --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
<Module 'pkg/module_py2.py'>
|
||||
<Function 'test_only_on_python2'>
|
||||
|
|
|
@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
|||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
|
@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x13a0990>
|
||||
self = <failure_demo.TestFailing object at 0x1619d90>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
|
@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
|||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x1326f50>()
|
||||
E + and 43 = <function g at 0x132f050>()
|
||||
E + where 42 = <function f at 0x15ba320>()
|
||||
E + and 43 = <function g at 0x15ba398>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x13a0e90>
|
||||
self = <failure_demo.TestFailing object at 0x1619450>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
|
@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x13a0bd0>
|
||||
self = <failure_demo.TestFailing object at 0x1619090>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x132f1b8>()
|
||||
E + where 42 = <function f at 0x15ba668>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x13a0150>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1619c10>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
|
@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139ee10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1613190>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
|
@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139be10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x16131d0>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
|
@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139bfd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1613c90>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
|
@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139bc10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x16130d0>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
|
@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139b310>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b98d0>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
|
@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139b8d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b9dd0>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
|
@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139b590>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b99d0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
|
||||
|
@ -191,7 +191,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139b150>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b92d0>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
|
@ -207,7 +207,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x139ba50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b9850>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
|
@ -217,7 +217,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1342910>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b9410>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
|
@ -226,7 +226,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1342110>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b0b90>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
|
@ -244,7 +244,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1342d90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b0e50>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
|
@ -257,7 +257,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1342410>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b0690>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
|
@ -270,7 +270,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x1342f10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x15b0f50>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
|
@ -289,7 +289,7 @@ get on the terminal - we are working on that):
|
|||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1342b50>.b
|
||||
E + where 1 = <failure_demo.Foo object at 0x15b0990>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
@ -299,8 +299,8 @@ get on the terminal - we are working on that):
|
|||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1342f90>.b
|
||||
E + where <failure_demo.Foo object at 0x1342f90> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x15b0390>.b
|
||||
E + where <failure_demo.Foo object at 0x15b0390> = <class 'failure_demo.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
@ -316,7 +316,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x1342610>
|
||||
self = <failure_demo.Foo object at 0x15b03d0>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
|
@ -332,15 +332,15 @@ get on the terminal - we are working on that):
|
|||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x1342510>.b
|
||||
E + where <failure_demo.Foo object at 0x1342510> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x1339790>.b
|
||||
E + where <failure_demo.Bar object at 0x1339790> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.Foo object at 0x15b04d0>.b
|
||||
E + where <failure_demo.Foo object at 0x15b04d0> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x15ac050>.b
|
||||
E + where <failure_demo.Bar object at 0x15ac050> = <class 'failure_demo.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13af560>
|
||||
self = <failure_demo.TestRaises instance at 0x1627c68>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
|
@ -355,7 +355,7 @@ get on the terminal - we are working on that):
|
|||
<0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:834>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13c0d88>
|
||||
self = <failure_demo.TestRaises instance at 0x16283b0>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
|
@ -364,7 +364,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13a6248>
|
||||
self = <failure_demo.TestRaises instance at 0x161f4d0>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
|
@ -373,7 +373,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13a6f80>
|
||||
self = <failure_demo.TestRaises instance at 0x161a248>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
|
@ -382,7 +382,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13a1cb0>
|
||||
self = <failure_demo.TestRaises instance at 0x161af80>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
|
@ -395,7 +395,7 @@ get on the terminal - we are working on that):
|
|||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x13a5ab8>
|
||||
self = <failure_demo.TestRaises instance at 0x1617d88>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
|
@ -423,7 +423,7 @@ get on the terminal - we are working on that):
|
|||
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x13b6908>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1628758>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
|
@ -452,7 +452,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x139cb00>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1612d88>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
|
@ -462,7 +462,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x13a7908>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1614b90>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
|
@ -472,19 +472,19 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x139d710>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1610998>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x13a3af8>('456')
|
||||
E + where <built-in method startswith of str object at 0x13a3af8> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x161db48>('456')
|
||||
E + where <built-in method startswith of str object at 0x161db48> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x13a7098>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x16140e0>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
|
@ -492,15 +492,15 @@ get on the terminal - we are working on that):
|
|||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x13a3af8>('456')
|
||||
E + where <built-in method startswith of str object at 0x13a3af8> = '123'.startswith
|
||||
E + where '123' = <function f at 0x13c4230>()
|
||||
E + and '456' = <function g at 0x13c45f0>()
|
||||
E assert <built-in method startswith of str object at 0x161db48>('456')
|
||||
E + where <built-in method startswith of str object at 0x161db48> = '123'.startswith
|
||||
E + where '123' = <function f at 0x163b848>()
|
||||
E + and '456' = <function g at 0x162d758>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x13c2710>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x16207a0>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
|
@ -510,18 +510,18 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x13a1f38>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x161ad88>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x13a1f38>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x161ad88>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1398320>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x160e560>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
|
@ -531,7 +531,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x1399128>
|
||||
self = <failure_demo.TestMoreErrors instance at 0x160f3b0>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
|
|
|
@ -106,7 +106,7 @@ directory with the above conftest.py::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
@ -150,20 +150,20 @@ and when running it will see a skipped "slow" test::
|
|||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-60/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /tmp/doc-exec-118/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.03 seconds ====================
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
|
@ -253,7 +253,7 @@ which will add the string to the test header accordingly::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
project deps: mylib-1.1
|
||||
collected 0 items
|
||||
|
||||
|
@ -276,7 +276,7 @@ which will add info only when run with "--v"::
|
|||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
|
@ -287,7 +287,7 @@ and nothing when run plainly::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
@ -319,7 +319,7 @@ Now we can profile which test functions execute the slowest::
|
|||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
|
@ -346,13 +346,13 @@ an ``incremental`` marker which is to be used on classes::
|
|||
import pytest
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
if hasattr(item.markers, "incremental"):
|
||||
if "incremental" in item.keywords:
|
||||
if call.excinfo is not None:
|
||||
parent = item.parent
|
||||
parent._previousfailed = item
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if hasattr(item.markers, "incremental"):
|
||||
if "incremental" in item.keywords:
|
||||
previousfailed = getattr(item.parent, "_previousfailed", None)
|
||||
if previousfailed is not None:
|
||||
pytest.xfail("previous test failed (%s)" %previousfailed.name)
|
||||
|
@ -380,7 +380,7 @@ If we run this::
|
|||
|
||||
$ py.test -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx.
|
||||
|
@ -388,7 +388,7 @@ If we run this::
|
|||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling instance at 0x28e1128>
|
||||
self = <test_step.TestUserHandling instance at 0x1c89638>
|
||||
|
||||
def test_modification(self):
|
||||
> assert 0
|
||||
|
|
|
@ -71,7 +71,7 @@ fixture function. Running the test looks like this::
|
|||
|
||||
$ py.test test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_smtpsimple.py F
|
||||
|
@ -79,7 +79,7 @@ fixture function. Running the test looks like this::
|
|||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x1ceeb00>
|
||||
smtp = <smtplib.SMTP instance at 0x1929d40>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
|
@ -130,7 +130,7 @@ Funcargs a prime example of dependency injection
|
|||
When injecting fixtures to test functions, pytest-2.0 introduced the
|
||||
term "funcargs" or "funcarg mechanism" which continues to be present
|
||||
also in pytest-2.3 docs. It now refers to the specific case of injecting
|
||||
fixture values to test functions by arguments. With pytest-2.3 there are
|
||||
fixture values as arguments to test functions. With pytest-2.3 there are
|
||||
more possibilities to use fixtures but "funcargs" probably will remain
|
||||
as the main way of dealing with fixtures.
|
||||
|
||||
|
@ -187,7 +187,7 @@ inspect what is going on and can now run the tests::
|
|||
|
||||
$ py.test test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
|
@ -195,7 +195,7 @@ inspect what is going on and can now run the tests::
|
|||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x1ef15f0>
|
||||
smtp = <smtplib.SMTP instance at 0x135f3b0>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
|
@ -207,7 +207,7 @@ inspect what is going on and can now run the tests::
|
|||
test_module.py:6: AssertionError
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x1ef15f0>
|
||||
smtp = <smtplib.SMTP instance at 0x135f3b0>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -216,7 +216,7 @@ inspect what is going on and can now run the tests::
|
|||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
========================= 2 failed in 0.24 seconds =========================
|
||||
========================= 2 failed in 0.22 seconds =========================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (session-scoped) ``smtp`` object was passed into the two
|
||||
|
@ -229,9 +229,9 @@ quick as a single one because they reuse the same instance.
|
|||
Fixtures can interact with the requesting test context
|
||||
-------------------------------------------------------------
|
||||
|
||||
By accepting the special :py:class:`request <FixtureRequest>` argument,
|
||||
Using the special :py:class:`request <FixtureRequest>` argument,
|
||||
fixture functions can introspect the function, class or module for which
|
||||
they are invoked and can optionally register finalizing cleanup
|
||||
they are invoked or can register finalizing (cleanup)
|
||||
functions which are called when the last test finished execution.
|
||||
|
||||
Further extending the previous ``smtp`` fixture example, let's try to
|
||||
|
@ -258,7 +258,7 @@ using it has executed::
|
|||
|
||||
$ py.test -s -q --tb=no
|
||||
FF
|
||||
finalizing <smtplib.SMTP instance at 0x284e488>
|
||||
finalizing <smtplib.SMTP instance at 0x2279758>
|
||||
|
||||
We see that the ``smtp`` instance is finalized after the two
|
||||
tests using it tests executed. If we had specified ``scope='function'``
|
||||
|
@ -270,7 +270,7 @@ server URL and has a test to verify the fixture picks it up::
|
|||
|
||||
# content of test_anothersmtp.py
|
||||
|
||||
smtpserver = "merlinux.eu" # will be read by smtp fixture
|
||||
smtpserver = "mail.python.org" # will be read by smtp fixture
|
||||
|
||||
def test_showhelo(smtp):
|
||||
assert 0, smtp.helo()
|
||||
|
@ -328,7 +328,7 @@ So let's just do another run::
|
|||
================================= FAILURES =================================
|
||||
__________________________ test_ehlo[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2f69440>
|
||||
smtp = <smtplib.SMTP instance at 0x1f5f680>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
|
@ -340,7 +340,7 @@ So let's just do another run::
|
|||
test_module.py:6: AssertionError
|
||||
__________________________ test_noop[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2f69440>
|
||||
smtp = <smtplib.SMTP instance at 0x1f5f680>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -351,7 +351,7 @@ So let's just do another run::
|
|||
test_module.py:11: AssertionError
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2fecea8>
|
||||
smtp = <smtplib.SMTP instance at 0x1fdffc8>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
|
@ -362,7 +362,7 @@ So let's just do another run::
|
|||
test_module.py:5: AssertionError
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2fecea8>
|
||||
smtp = <smtplib.SMTP instance at 0x1fdffc8>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -410,13 +410,13 @@ Here we declare an ``app`` fixture which receives the previously defined
|
|||
|
||||
$ py.test -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED
|
||||
test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED
|
||||
|
||||
========================= 2 passed in 0.09 seconds =========================
|
||||
========================= 2 passed in 0.11 seconds =========================
|
||||
|
||||
Due to the parametrization of ``smtp`` the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
|
@ -472,7 +472,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
|
|||
|
||||
$ py.test -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_module.py:16: test_0[1] PASSED
|
||||
|
|
|
@ -23,7 +23,7 @@ Installation options::
|
|||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is py.test version 2.3.0.dev27, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
|
||||
This is py.test version 2.3.0.dev28, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
|
@ -45,7 +45,7 @@ That's it. You can execute the test function now::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
|
@ -122,7 +122,7 @@ run the module by passing its filename::
|
|||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x1ac71b8>
|
||||
self = <test_class.TestClass instance at 0x261e6c8>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
|
@ -157,7 +157,7 @@ before performing the test function call. Let's just run it::
|
|||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-914/test_needsfiles0')
|
||||
tmpdir = local('/tmp/pytest-990/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
|
@ -166,7 +166,7 @@ before performing the test function call. Let's just run it::
|
|||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
----------------------------- Captured stdout ------------------------------
|
||||
/tmp/pytest-914/test_needsfiles0
|
||||
/tmp/pytest-990/test_needsfiles0
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
|
|
@ -53,7 +53,7 @@ which will thus run three times::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F
|
||||
|
@ -135,8 +135,8 @@ Let's also run with a stringinput that will lead to a failing test::
|
|||
|
||||
def test_valid_string(stringinput):
|
||||
> assert stringinput.isalpha()
|
||||
E assert <built-in method isalpha of str object at 0x2b58af1ef030>()
|
||||
E + where <built-in method isalpha of str object at 0x2b58af1ef030> = '!'.isalpha
|
||||
E assert <built-in method isalpha of str object at 0x2b1b676af030>()
|
||||
E + where <built-in method isalpha of str object at 0x2b1b676af030> = '!'.isalpha
|
||||
|
||||
test_strings.py:3: AssertionError
|
||||
|
||||
|
@ -149,7 +149,7 @@ listlist::
|
|||
$ py.test -q -rs test_strings.py
|
||||
s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:943: got empty parameter set, function test_valid_string at /tmp/doc-exec-26/test_strings.py:1
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:943: got empty parameter set, function test_valid_string at /tmp/doc-exec-84/test_strings.py:1
|
||||
|
||||
For further examples, you might want to look at :ref:`more
|
||||
parametrization examples <paramexamples>`.
|
||||
|
|
|
@ -132,7 +132,7 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 6 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
|
|
|
@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
|
|||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
|
@ -37,7 +37,7 @@ Running this would result in a passed test except for the last
|
|||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-915/test_create_file0')
|
||||
tmpdir = local('/tmp/pytest-991/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
|
@ -48,7 +48,7 @@ Running this would result in a passed test except for the last
|
|||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
========================= 1 failed in 0.07 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ the ``self.db`` values in the traceback::
|
|||
|
||||
$ py.test test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev27
|
||||
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev28
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF
|
||||
|
@ -100,7 +100,7 @@ the ``self.db`` values in the traceback::
|
|||
def test_method1(self):
|
||||
assert hasattr(self, "db")
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x18ff9e0>
|
||||
E AssertionError: <conftest.DummyDB instance at 0x26ccbd8>
|
||||
|
||||
test_unittest_db.py:9: AssertionError
|
||||
___________________________ MyTest.test_method2 ____________________________
|
||||
|
@ -109,7 +109,7 @@ the ``self.db`` values in the traceback::
|
|||
|
||||
def test_method2(self):
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x18ff9e0>
|
||||
E AssertionError: <conftest.DummyDB instance at 0x26ccbd8>
|
||||
|
||||
test_unittest_db.py:12: AssertionError
|
||||
========================= 2 failed in 0.02 seconds =========================
|
||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.3.0.dev28',
|
||||
version='2.3.0.dev29',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -284,6 +284,25 @@ class TestFunctional:
|
|||
assert l[0].args == ("pos0",)
|
||||
assert l[1].args == ("pos1",)
|
||||
|
||||
def test_keywords_at_node_level(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def some(request):
|
||||
request.keywords["hello"] = 42
|
||||
assert "world" not in request.keywords
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def funcsetup(request):
|
||||
assert "world" in request.keywords
|
||||
assert "hello" in request.keywords
|
||||
|
||||
@pytest.mark.world
|
||||
def test_function():
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
class TestKeywordSelection:
|
||||
def test_select_simple(self, testdir):
|
||||
|
|
|
@ -1805,7 +1805,7 @@ def test_issue117_sessionscopeteardown(testdir):
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines([
|
||||
result.stdout.fnmatch_lines([
|
||||
"*3/x*",
|
||||
"*ZeroDivisionError*",
|
||||
])
|
||||
|
|
Loading…
Reference in New Issue