summaryrefslogtreecommitdiff
path: root/python/helpers/pydev/pydev_runfiles_pytest2.py
blob: e40d60f12e24da3d732d98750e9c9a6eecb20573 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
import pickle
import zlib
import base64
import os
import py
from py._code import code  # @UnresolvedImport
import pydev_runfiles_xml_rpc
from pydevd_file_utils import _NormFile
import pytest
import sys
import time



#===================================================================================================
# Load filters with tests we should skip
#===================================================================================================
py_test_accept_filter = None

def _load_filters():
    global py_test_accept_filter
    if py_test_accept_filter is None:
        py_test_accept_filter = os.environ.get('PYDEV_PYTEST_SKIP')
        if py_test_accept_filter:
            py_test_accept_filter = pickle.loads(zlib.decompress(base64.b64decode(py_test_accept_filter)))
        else:
            py_test_accept_filter = {}


def connect_to_server_for_communication_to_xml_rpc_on_xdist():
    main_pid = os.environ.get('PYDEV_MAIN_PID')
    if main_pid and main_pid != str(os.getpid()):
        port = os.environ.get('PYDEV_PYTEST_SERVER')
        if not port:
            sys.stderr.write('Error: no PYDEV_PYTEST_SERVER environment variable defined.\n')
        else:
            pydev_runfiles_xml_rpc.InitializeServer(int(port), daemon=True)


#===================================================================================================
# Mocking to get clickable file representations
#===================================================================================================
def _MockFileRepresentation():
    code.ReprFileLocation._original_toterminal = code.ReprFileLocation.toterminal

    def toterminal(self, tw):
        # filename and lineno output for each entry,
        # using an output format that most editors understand
        msg = self.message
        i = msg.find("\n")
        if i != -1:
            msg = msg[:i]

        tw.line('File "%s", line %s\n%s' %(os.path.abspath(self.path), self.lineno, msg))

    code.ReprFileLocation.toterminal = toterminal


def _UninstallMockFileRepresentation():
    code.ReprFileLocation.toterminal = code.ReprFileLocation._original_toterminal #@UndefinedVariable


class State:
    numcollected = 0
    start_time = time.time()


def pytest_configure(*args, **kwargs):
    _MockFileRepresentation()


def pytest_collectreport(report):

    i = 0
    for x in report.result:
        if isinstance(x, pytest.Item):
            try:
                # Call our setup (which may do a skip, in which
                # case we won't count it).
                pytest_runtest_setup(x)
                i += 1
            except:
                continue
    State.numcollected += i


def pytest_collection_modifyitems():
    connect_to_server_for_communication_to_xml_rpc_on_xdist()
    pydev_runfiles_xml_rpc.notifyTestsCollected(State.numcollected)
    State.numcollected = 0


def pytest_unconfigure(*args, **kwargs):
    _UninstallMockFileRepresentation()
    pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - State.start_time,))


def pytest_runtest_setup(item):
    filename = item.fspath.strpath
    test = item.location[2]
    State.start_test_time = time.time()

    pydev_runfiles_xml_rpc.notifyStartTest(filename, test)


def report_test(cond, filename, test, captured_output, error_contents, delta):
    '''
    @param filename: 'D:\\src\\mod1\\hello.py'
    @param test: 'TestCase.testMet1'
    @param cond: fail, error, ok
    '''
    time_str = '%.2f' % (delta,)
    pydev_runfiles_xml_rpc.notifyTest(cond, captured_output, error_contents, filename, test, time_str)


def pytest_runtest_makereport(item, call):
    report_when = call.when
    report_duration = call.stop-call.start
    excinfo = call.excinfo

    if not call.excinfo:
        report_outcome = "passed"
        report_longrepr = None
    else:
        excinfo = call.excinfo
        if not isinstance(excinfo, py.code.ExceptionInfo):
            report_outcome = "failed"
            report_longrepr = excinfo

        elif excinfo.errisinstance(py.test.skip.Exception):
            report_outcome = "skipped"
            r = excinfo._getreprcrash()
            report_longrepr = None #(str(r.path), r.lineno, r.message)

        else:
            report_outcome = "failed"
            if call.when == "call":
                report_longrepr = item.repr_failure(excinfo)

            else: # exception in setup or teardown
                report_longrepr = item._repr_failure_py(excinfo, style=item.config.option.tbstyle)

    filename = item.fspath.strpath
    test = item.location[2]

    status = 'ok'
    captured_output = ''
    error_contents = ''

    if report_outcome in ('passed', 'skipped'):
        #passed or skipped: no need to report if in setup or teardown (only on the actual test if it passed).
        if report_when in ('setup', 'teardown'):
            return

    else:
        #It has only passed, skipped and failed (no error), so, let's consider error if not on call.
        if report_when == 'setup':
            if status == 'ok':
                status = 'error'

        elif report_when == 'teardown':
            if status == 'ok':
                status = 'error'

        else:
            #any error in the call (not in setup or teardown) is considered a regular failure.
            status = 'fail'


    if call.excinfo:
        rep = report_longrepr
        if hasattr(rep, 'reprcrash'):
            reprcrash = rep.reprcrash
            error_contents += str(reprcrash)
            error_contents += '\n'

        if hasattr(rep, 'reprtraceback'):
            error_contents += str(rep.reprtraceback)

        if hasattr(rep, 'sections'):
            for name, content, sep in rep.sections:
                error_contents += sep * 40
                error_contents += name
                error_contents += sep * 40
                error_contents += '\n'
                error_contents += content
                error_contents += '\n'

    if status != 'skip': #I.e.: don't event report skips...
        report_test(status, filename, test, captured_output, error_contents, report_duration)



@pytest.mark.tryfirst
def pytest_runtest_setup(item):
    '''
    Skips tests. With xdist will be on a secondary process.
    '''
    _load_filters()
    if not py_test_accept_filter:
        return #Keep on going (nothing to filter)

    f = _NormFile(str(item.parent.fspath))
    name = item.name

    if f not in py_test_accept_filter:
        pytest.skip() # Skip the file

    accept_tests = py_test_accept_filter[f]

    if item.cls is not None:
        class_name = item.cls.__name__
    else:
        class_name = None
    for test in accept_tests:
        if test == name:
            #Direct match of the test (just go on with the default loading)
            return

        if class_name is not None:
            if test == class_name + '.' + name:
                return

            if class_name == test:
                return

    # If we had a match it'd have returned already.
    pytest.skip() # Skip the test