diff --git a/.editorconfig b/.editorconfig index f560af744..679ae499c 100644 --- a/.editorconfig +++ b/.editorconfig @@ -18,6 +18,9 @@ trim_trailing_whitespace = true [*.py] max_line_length = 100 +[*.pyi] +max_line_length = 100 + [*.c] max_line_length = 100 @@ -30,6 +33,12 @@ indent_size = 2 [*.rst] max_line_length = 79 +[*.tok] +trim_trailing_whitespace = false + +[*_dos.tok] +end_of_line = crlf + [Makefile] indent_style = tab indent_size = 8 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..7e1f430d3 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,14 @@ +# Commits to ignore when doing git-blame. + +# 2023-01-05 style: use good style for annotated defaults parameters +78444f4c06df6a634fa67dd99ee7c07b6b633d9e + +# 2023-01-06 style(perf): blacken lab/benchmark.py +bf6c12f5da54db7c5c0cc47cbf22c70f686e8236 + +# 2023-03-22 style: use double-quotes +16abd82b6e87753184e8308c4b2606ff3979f8d3 +b7be64538aa480fce641349d3053e9a84862d571 + +# 2023-04-01 style: use double-quotes in JavaScript +b03ab92bae24c54f1d5a98baa3af6b9a18de4d36 diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..6a81b3084 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,8 @@ +# Treat each other well + +Everyone participating in the coverage.py project, and in particular in the +issue tracker, pull requests, and social media activity, is expected to treat +other people with respect and to follow the guidelines articulated in the +[Python Community Code of Conduct][psf_coc]. + +[psf_coc]: https://www.python.org/psf/codeofconduct/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 67393a8ca..71b32f2fa 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -16,7 +16,7 @@ How can we reproduce the problem? Please *be specific*. Don't link to a failing 1. What version of coverage.py shows the problem? The output of `coverage debug sys` is helpful. 1. What versions of what packages do you have installed? The output of `pip freeze` is helpful. 1. What code shows the problem? Give us a specific commit of a specific repo that we can check out. If you've already worked around the problem, please provide a commit before that fix. -1. What commands did you run? +1. What commands should we run to reproduce the problem? *Be specific*. Include everything, even `git clone`, `pip install`, and so on. Explain like we're five! **Expected behavior** A clear and concise description of what you expected to happen. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 433310b17..5c7bfc9d2 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -8,9 +8,6 @@ contact_links: - name: Frequently Asked Questions url: https://coverage.readthedocs.io/en/latest/faq.html about: Some common problems are described here. - - name: Testing in Python mailing list - url: http://lists.idyll.org/listinfo/testing-in-python - about: Ask questions about using coverage.py here. - name: Tidelift security contact url: https://tidelift.com/security about: Please report security vulnerabilities here. diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 000000000..005467cec --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,4 @@ +# Security Disclosures + +To report a security vulnerability, please use the [Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure with maintainers. diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml deleted file mode 100644 index f25aaec0b..000000000 --- a/.github/workflows/cancel.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 -# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt - -# This action finds in-progress Action jobs for the same branch, and cancels -# them. There's little point in continuing to run superseded jobs. - -name: "Cancel" - -on: - push: - -permissions: - contents: read - -jobs: - cancel: - permissions: - actions: write # for styfle/cancel-workflow-action to cancel/stop running workflows - runs-on: ubuntu-latest - steps: - - name: "Cancel Previous Runs" - uses: styfle/cancel-workflow-action@0.10.0 - with: - access_token: ${{ github.token }} - workflow_id: all diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b1d24b6f3..ad316eb4d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -22,6 +22,9 @@ on: schedule: - cron: '30 20 * * 6' +permissions: + contents: read + jobs: analyze: name: Analyze diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 00d81159a..ab94a83e3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -18,20 +18,28 @@ defaults: env: PIP_DISABLE_PIP_VERSION_CHECK: 1 + FORCE_COLOR: 1 # Get colored pytest output + +permissions: + contents: read + +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true jobs: coverage: - name: "Python ${{ matrix.python-version }} on ${{ matrix.os }}" - runs-on: "${{ matrix.os }}" + name: "${{ matrix.python-version }} on ${{ matrix.os }}" + runs-on: "${{ matrix.os }}-latest" strategy: matrix: os: - - ubuntu-latest - - macos-latest - - windows-latest + - ubuntu + - macos + - windows python-version: - # When changing this list, be sure to check the [gh-actions] list in + # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://github.com/actions/python-versions/blob/main/versions-manifest.json @@ -39,15 +47,26 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0-beta.3" + - "3.11" + - "3.12" - "pypy-3.7" + - "pypy-3.8" + - "pypy-3.9" exclude: # Windows PyPy doesn't seem to work? - os: windows-latest python-version: "pypy-3.7" + - os: windows-latest + python-version: "pypy-3.8" + - os: windows-latest + python-version: "pypy-3.9" # Mac PyPy always takes the longest, and doesn't add anything. - os: macos-latest python-version: "pypy-3.7" + - os: macos-latest + python-version: "pypy-3.8" + - os: macos-latest + python-version: "pypy-3.9" # If one job fails, stop the whole thing. fail-fast: true @@ -59,6 +78,7 @@ jobs: uses: "actions/setup-python@v4" with: python-version: "${{ matrix.python-version }}" + allow-prereleases: true cache: pip cache-dependency-path: 'requirements/*.pip' @@ -77,6 +97,13 @@ jobs: set -xe python -m tox + - name: "Combine data" + env: + COVERAGE_RCFILE: "metacov.ini" + run: | + python -m coverage combine + mv .metacov .metacov.${{ matrix.python-version }}.${{ matrix.os }} + - name: "Upload coverage data" uses: actions/upload-artifact@v3 with: @@ -87,6 +114,10 @@ jobs: name: "Combine coverage data" needs: coverage runs-on: ubuntu-latest + outputs: + total: ${{ steps.total.outputs.total }} + env: + COVERAGE_RCFILE: "metacov.ini" steps: - name: "Check out the repo" @@ -95,7 +126,7 @@ jobs: - name: "Set up Python" uses: "actions/setup-python@v4" with: - python-version: "3.8" + python-version: "3.7" # Minimum of PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' @@ -115,13 +146,10 @@ jobs: - name: "Combine and report" id: combine env: - COVERAGE_RCFILE: "metacov.ini" - COVERAGE_METAFILE: ".metacov" COVERAGE_CONTEXT: "yes" run: | set -xe - python -m igor combine_html - python -m coverage json + python igor.py combine_html - name: "Upload HTML report" uses: actions/upload-artifact@v3 @@ -129,11 +157,10 @@ jobs: name: html_report path: htmlcov - - name: "Upload JSON report" - uses: actions/upload-artifact@v3 - with: - name: json_report - path: coverage.json + - name: "Get total" + id: total + run: | + echo "total=$(python -m coverage report --format=total)" >> $GITHUB_OUTPUT publish: name: "Publish coverage report" @@ -141,50 +168,48 @@ jobs: runs-on: ubuntu-latest steps: - - name: "Checkout reports repo" - run: | - set -xe - git clone --depth=1 --no-checkout https://${{ secrets.COVERAGE_REPORTS_TOKEN }}@github.com/nedbat/coverage-reports reports_repo - cd reports_repo - git sparse-checkout init --cone - git sparse-checkout set --skip-checks '/*' '!/reports' - git config user.name nedbat - git config user.email ned@nedbatchelder.com - git checkout main - - - name: "Download coverage JSON report" - uses: actions/download-artifact@v3 - with: - name: json_report - - name: "Compute info for later steps" id: info run: | set -xe - export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])") - export PCTINT=$(echo $TOTAL | cut -f1 -d.) export SHA10=$(echo ${{ github.sha }} | cut -c 1-10) export SLUG=$(date +'%Y%m%d')_$SHA10 export REPORT_DIR=reports/$SLUG/htmlcov export REF="${{ github.ref }}" - echo "total=$TOTAL" >> $GITHUB_ENV + echo "total=${{ needs.combine.outputs.total }}" >> $GITHUB_ENV echo "sha10=$SHA10" >> $GITHUB_ENV echo "slug=$SLUG" >> $GITHUB_ENV echo "report_dir=$REPORT_DIR" >> $GITHUB_ENV - echo "url=https://nedbat.github.io/coverage-reports/$REPORT_DIR" >> $GITHUB_ENV + echo "url=https://htmlpreview.github.io/?https://github.com/nedbat/coverage-reports/blob/main/reports/$SLUG/htmlcov/index.html" >> $GITHUB_ENV echo "branch=${REF#refs/heads/}" >> $GITHUB_ENV - if (($PCTINT >= 85)); then echo "badge_color=green"; fi >> $GITHUB_ENV - if (($PCTINT < 85)); then echo "badge_color=yellow"; fi >> $GITHUB_ENV - if (($PCTINT < 70)); then echo "badge_color=orange"; fi >> $GITHUB_ENV - if (($PCTINT < 60)); then echo "badge_color=red"; fi >> $GITHUB_ENV + + - name: "Summarize" + run: | + echo '### Total coverage: ${{ env.total }}%' >> $GITHUB_STEP_SUMMARY + + - name: "Checkout reports repo" + if: ${{ github.ref == 'refs/heads/master' }} + run: | + set -xe + git clone --depth=1 --no-checkout https://${{ secrets.COVERAGE_REPORTS_TOKEN }}@github.com/nedbat/coverage-reports reports_repo + cd reports_repo + git sparse-checkout init --cone + git sparse-checkout set --skip-checks '/*' '!/reports' + git config user.name nedbat + git config user.email ned@nedbatchelder.com + git checkout main - name: "Download coverage HTML report" + if: ${{ github.ref == 'refs/heads/master' }} uses: actions/download-artifact@v3 with: name: html_report path: reports_repo/${{ env.report_dir }} - name: "Push to report repo" + if: ${{ github.ref == 'refs/heads/master' }} + env: + COMMIT_MESSAGE: ${{ github.event.head_commit.message }} run: | set -xe # Make the redirect to the latest report. @@ -192,7 +217,9 @@ jobs: echo "" >> reports_repo/latest.html echo "
Coverage report redirect..." >> reports_repo/latest.html # Make the commit message. - echo "${{ env.total }}% - ${{ github.event.head_commit.message }}" > commit.txt + echo "${{ env.total }}% - $COMMIT_MESSAGE" > commit.txt + echo "" >> commit.txt + echo "[View the report](${{ env.url }})" >> commit.txt echo "" >> commit.txt echo "${{ env.url }}" >> commit.txt echo "${{ env.sha10 }}: ${{ env.branch }}" >> commit.txt @@ -203,19 +230,18 @@ jobs: git add ${{ env.report_dir }} latest.html git commit --file=../commit.txt git push + echo '[${{ env.url }}](${{ env.url }})' >> $GITHUB_STEP_SUMMARY - name: "Create badge" + if: ${{ github.ref == 'refs/heads/master' }} # https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5 - uses: schneegans/dynamic-badges-action@v1.4.0 + uses: schneegans/dynamic-badges-action@5d424ad4060f866e4d1dab8f8da0456e6b1c4f56 with: auth: ${{ secrets.METACOV_GIST_SECRET }} gistID: 8c6980f77988a327348f9b02bbaf67f5 filename: metacov.json label: Coverage message: ${{ env.total }}% - color: ${{ env.badge_color }} - - - name: "Create summary" - run: | - echo '### Total coverage: ${{ env.total }}%' >> $GITHUB_STEP_SUMMARY - echo '[${{ env.url }}](${{ env.url }})' >> $GITHUB_STEP_SUMMARY + minColorRange: 60 + maxColorRange: 95 + valColorRange: ${{ env.total }} diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 845c763e8..943a4b57c 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -4,8 +4,15 @@ # # Source repository: https://github.com/actions/dependency-review-action # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement + name: 'Dependency Review' -on: [pull_request] +on: + push: + branches: + - master + - nedbat/* + pull_request: + workflow_dispatch: permissions: contents: read @@ -17,4 +24,7 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@v3 - name: 'Dependency Review' - uses: actions/dependency-review-action@v2 + uses: actions/dependency-review-action@v3 + with: + base-ref: ${{ github.event.pull_request.base.sha || 'master' }} + head-ref: ${{ github.event.pull_request.head.sha || github.ref }} diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml index fcce04c66..53e081455 100644 --- a/.github/workflows/kit.yml +++ b/.github/workflows/kit.yml @@ -41,9 +41,13 @@ env: permissions: contents: read +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + jobs: wheels: - name: "Build ${{ matrix.os }} ${{ matrix.py }} ${{ matrix.arch }} wheels" + name: "${{ matrix.py }} ${{ matrix.os }} ${{ matrix.arch }} wheels" runs-on: ${{ matrix.os }}-latest strategy: matrix: @@ -73,11 +77,12 @@ jobs: # } # # PYVERSIONS. Available versions: # # https://github.com/actions/python-versions/blob/main/versions-manifest.json - # pys = ["cp37", "cp38", "cp39", "cp310"] + # # PyPy versions are handled further below in the "pypy" step. + # pys = ["cp37", "cp38", "cp39", "cp310", "cp311", "cp312"] # # # Some OS/arch combinations need overrides for the Python versions: # os_arch_pys = { - # ("macos", "arm64"): ["cp38", "cp39", "cp310"], + # ("macos", "arm64"): ["cp38", "cp39", "cp310", "cp311"], # } # # #----- ^^^ ---------------------- ^^^ ----- @@ -97,36 +102,49 @@ jobs: - {"os": "ubuntu", "py": "cp38", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp39", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp310", "arch": "x86_64"} + - {"os": "ubuntu", "py": "cp311", "arch": "x86_64"} + - {"os": "ubuntu", "py": "cp312", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp37", "arch": "i686"} - {"os": "ubuntu", "py": "cp38", "arch": "i686"} - {"os": "ubuntu", "py": "cp39", "arch": "i686"} - {"os": "ubuntu", "py": "cp310", "arch": "i686"} + - {"os": "ubuntu", "py": "cp311", "arch": "i686"} + - {"os": "ubuntu", "py": "cp312", "arch": "i686"} - {"os": "ubuntu", "py": "cp37", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp38", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp39", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp310", "arch": "aarch64"} + - {"os": "ubuntu", "py": "cp311", "arch": "aarch64"} + - {"os": "ubuntu", "py": "cp312", "arch": "aarch64"} - {"os": "macos", "py": "cp38", "arch": "arm64"} - {"os": "macos", "py": "cp39", "arch": "arm64"} - {"os": "macos", "py": "cp310", "arch": "arm64"} + - {"os": "macos", "py": "cp311", "arch": "arm64"} - {"os": "macos", "py": "cp37", "arch": "x86_64"} - {"os": "macos", "py": "cp38", "arch": "x86_64"} - {"os": "macos", "py": "cp39", "arch": "x86_64"} - {"os": "macos", "py": "cp310", "arch": "x86_64"} + - {"os": "macos", "py": "cp311", "arch": "x86_64"} + - {"os": "macos", "py": "cp312", "arch": "x86_64"} - {"os": "windows", "py": "cp37", "arch": "x86"} - {"os": "windows", "py": "cp38", "arch": "x86"} - {"os": "windows", "py": "cp39", "arch": "x86"} - {"os": "windows", "py": "cp310", "arch": "x86"} + - {"os": "windows", "py": "cp311", "arch": "x86"} + - {"os": "windows", "py": "cp312", "arch": "x86"} - {"os": "windows", "py": "cp37", "arch": "AMD64"} - {"os": "windows", "py": "cp38", "arch": "AMD64"} - {"os": "windows", "py": "cp39", "arch": "AMD64"} - {"os": "windows", "py": "cp310", "arch": "AMD64"} - # [[[end]]] (checksum: 4b02126e18e2b3798783d3cc6f303552) + - {"os": "windows", "py": "cp311", "arch": "AMD64"} + - {"os": "windows", "py": "cp312", "arch": "AMD64"} + # [[[end]]] (checksum: 5e62f362263935c1e3a21299f8a1b649) fail-fast: false steps: - - name: Setup QEMU + - name: "Setup QEMU" if: matrix.os == 'ubuntu' - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 with: platforms: arm64 @@ -136,6 +154,7 @@ jobs: - name: "Install Python 3.8" uses: actions/setup-python@v4 with: + # PYVERSIONS python-version: "3.8" cache: pip cache-dependency-path: 'requirements/*.pip' @@ -149,6 +168,7 @@ jobs: CIBW_BUILD: ${{ matrix.py }}-* CIBW_ARCHS: ${{ matrix.arch }} CIBW_ENVIRONMENT: PIP_DISABLE_PIP_VERSION_CHECK=1 + CIBW_PRERELEASE_PYTHONS: True CIBW_TEST_COMMAND: python -c "from coverage.tracer import CTracer; print('CTracer OK!')" run: | python -m cibuildwheel --output-dir wheelhouse @@ -162,9 +182,10 @@ jobs: with: name: dist path: wheelhouse/*.whl + retention-days: 7 sdist: - name: "Build source distribution" + name: "Source distribution" runs-on: ubuntu-latest steps: - name: "Check out the repo" @@ -173,6 +194,7 @@ jobs: - name: "Install Python 3.8" uses: actions/setup-python@v4 with: + # PYVERSIONS python-version: "3.8" cache: pip cache-dependency-path: 'requirements/*.pip' @@ -194,9 +216,10 @@ jobs: with: name: dist path: dist/*.tar.gz + retention-days: 7 pypy: - name: "Build PyPy wheel" + name: "PyPy wheel" runs-on: ubuntu-latest steps: - name: "Check out the repo" @@ -205,8 +228,7 @@ jobs: - name: "Install PyPy" uses: actions/setup-python@v4 with: - # PYVERSIONS - python-version: "pypy-3.7" + python-version: "pypy-3.7" # Minimum of PyPy PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' @@ -216,9 +238,9 @@ jobs: - name: "Build wheel" run: | - # One wheel works for all PyPy versions. + # One wheel works for all PyPy versions. PYVERSIONS # yes, this is weird syntax: https://github.com/pypa/build/issues/202 - pypy3 -m build -w -C="--global-option=--python-tag" -C="--global-option=pp36.pp37.pp38" + pypy3 -m build -w -C="--global-option=--python-tag" -C="--global-option=pp37.pp38.pp39" - name: "List wheels" run: | @@ -229,3 +251,40 @@ jobs: with: name: dist path: dist/*.whl + retention-days: 7 + + sign: + # This signs our artifacts, but we don't use the signatures for anything + # yet. Someday maybe PyPI will have a way to upload and verify them. + name: "Sign artifacts" + needs: + - wheels + - sdist + - pypy + runs-on: ubuntu-latest + permissions: + id-token: write + steps: + - name: "Download artifacts" + uses: actions/download-artifact@v3 + with: + name: dist + + - name: "Sign artifacts" + uses: sigstore/gh-action-sigstore-python@v1.2.3 + with: + inputs: coverage-*.* + + - name: "List files" + run: | + ls -alR + + - name: "Upload signatures" + uses: actions/upload-artifact@v3 + with: + name: signatures + path: | + *.crt + *.sig + *.sigstore + retention-days: 7 diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml index 6aec3b8b9..319064c94 100644 --- a/.github/workflows/python-nightly.yml +++ b/.github/workflows/python-nightly.yml @@ -22,25 +22,38 @@ env: PIP_DISABLE_PIP_VERSION_CHECK: 1 COVERAGE_IGOR_VERBOSE: 1 +permissions: + contents: read + +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + jobs: tests: - name: "Python ${{ matrix.python-version }}" - runs-on: ubuntu-latest + name: "${{ matrix.python-version }}" + # Choose a recent Ubuntu that deadsnakes still builds all the versions for. + # For example, deadsnakes doesn't provide 3.10 nightly for 22.04 (jammy) + # because jammy ships 3.10, and deadsnakes doesn't want to clobber it. + # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages + # https://github.com/deadsnakes/issues/issues/234 + runs-on: ubuntu-20.04 strategy: matrix: python-version: - # When changing this list, be sure to check the [gh-actions] list in + # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages - - "3.9-dev" - "3.10-dev" - "3.11-dev" + - "3.12-dev" # https://github.com/actions/setup-python#available-versions-of-pypy - "pypy-3.7-nightly" - "pypy-3.8-nightly" - "pypy-3.9-nightly" + - "pypy-3.10-nightly" fail-fast: false steps: @@ -48,7 +61,7 @@ jobs: uses: "actions/checkout@v3" - name: "Install ${{ matrix.python-version }} with deadsnakes" - uses: "deadsnakes/action@v2.1.1" + uses: deadsnakes/action@e3117c2981fd8afe4af79f3e1be80066c82b70f5 if: "!startsWith(matrix.python-version, 'pypy-')" with: python-version: "${{ matrix.python-version }}" diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 678e8310e..9ee690df9 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -21,6 +21,10 @@ env: permissions: contents: read +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + jobs: lint: name: "Pylint etc" @@ -42,15 +46,37 @@ jobs: - name: "Install dependencies" run: | - set -xe - python -VV - python -m site python -m pip install -r requirements/tox.pip - name: "Tox lint" run: | python -m tox -e lint + mypy: + name: "Check types" + runs-on: ubuntu-latest + + steps: + - name: "Check out the repo" + uses: "actions/checkout@v3" + + - name: "Install Python" + uses: "actions/setup-python@v4" + with: + python-version: "3.8" # Minimum of PYVERSIONS, but at least 3.8 + cache: pip + cache-dependency-path: 'requirements/*.pip' + + - name: "Install dependencies" + run: | + # We run on 3.8, but the pins were made on 3.7, so don't insist on + # hashes, which won't match. + python -m pip install -r requirements/tox.pip + + - name: "Tox mypy" + run: | + python -m tox -e mypy + doc: name: "Build docs" runs-on: ubuntu-latest diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml index 23c30e3c9..8ab3608bc 100644 --- a/.github/workflows/testsuite.yml +++ b/.github/workflows/testsuite.yml @@ -18,36 +18,44 @@ defaults: env: PIP_DISABLE_PIP_VERSION_CHECK: 1 COVERAGE_IGOR_VERBOSE: 1 + FORCE_COLOR: 1 # Get colored pytest output permissions: contents: read +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + jobs: tests: - name: "Python ${{ matrix.python-version }} on ${{ matrix.os }}" - runs-on: "${{ matrix.os }}" + name: "${{ matrix.python-version }} on ${{ matrix.os }}" + runs-on: "${{ matrix.os }}-latest" strategy: matrix: os: - - ubuntu-latest - - macos-latest - - windows-latest + - ubuntu + - macos + - windows python-version: - # When changing this list, be sure to check the [gh-actions] list in + # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://github.com/actions/python-versions/blob/main/versions-manifest.json + # https://github.com/actions/setup-python/blob/main/docs/advanced-usage.md#available-versions-of-python-and-pypy - "3.7" - "3.8" - "3.9" - "3.10" - - "3.11.0-beta.3" + - "3.11" + - "3.12" - "pypy-3.7" + - "pypy-3.9" exclude: - # Windows PyPy doesn't seem to work? - - os: windows-latest - python-version: "pypy-3.7" + # Windows PyPy-3.9 always gets killed. + - os: windows + python-version: "pypy-3.9" fail-fast: false steps: @@ -58,7 +66,7 @@ jobs: uses: "actions/setup-python@v4" with: python-version: "${{ matrix.python-version }}" - # Can we start using the pip cache again? + allow-prereleases: true cache: pip cache-dependency-path: 'requirements/*.pip' @@ -72,20 +80,26 @@ jobs: # python -c "import urllib.request as r; exec(r.urlopen('https://bit.ly/pydoctor').read())" - name: "Run tox for ${{ matrix.python-version }}" - continue-on-error: true - id: tox1 run: | python -m tox -- -rfsEX - name: "Retry tox for ${{ matrix.python-version }}" - id: tox2 - if: steps.tox1.outcome == 'failure' + if: failure() run: | - python -m tox -- -rfsEX + # `exit 1` makes sure that the job remains red with flaky runs + python -m tox -- -rfsEX --lf -vvvvv && exit 1 - - name: "Set status" - if: always() - run: | - if ${{ steps.tox1.outcome != 'success' && steps.tox2.outcome != 'success' }}; then - exit 1 - fi + # This job aggregates test results. It's the required check for branch protection. + # https://github.com/marketplace/actions/alls-green#why + # https://github.com/orgs/community/discussions/33579 + success: + name: Tests successful + if: always() + needs: + - tests + runs-on: ubuntu-latest + steps: + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe + with: + jobs: ${{ toJSON(needs) }} diff --git a/.gitignore b/.gitignore index 2373d5dc7..a49767e77 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ coverage.json # Stuff in the root. build *.egg-info +cheats.txt dist htmlcov MANIFEST diff --git a/.readthedocs.yml b/.readthedocs.yml index 8c96c02fd..48d6b434d 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -17,6 +17,7 @@ formats: - pdf python: + # PYVERSIONS version: 3.7 install: - requirements: doc/requirements.pip diff --git a/.treerc b/.treerc index ddea2e92c..0916e24a9 100644 --- a/.treerc +++ b/.treerc @@ -14,5 +14,5 @@ ignore = *.gz *.zip _build _spell *.egg *.egg-info - .mypy_cache + .*_cache tmp diff --git a/CHANGES.rst b/CHANGES.rst index fdedc7aaa..4b567d6dc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -17,23 +17,553 @@ development at the same time, such as 4.5.x and 5.0. .. Version 9.8.1 — 2027-07-27 .. -------------------------- +.. scriv-start-here + +.. _changes_7-2-7: + +Version 7.2.7 — 2023-05-29 +-------------------------- + +- Fix: reverted a `change from 6.4.3"
- self.text = text
- if not self.text:
+ if text is not None:
+ self.text: str = text
+ else:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
@@ -46,42 +60,45 @@ def __init__(self, text=None, filename=None, exclude=None):
self.exclude = exclude
# The text lines of the parsed code.
- self.lines = self.text.split('\n')
+ self.lines: List[str] = self.text.split("\n")
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
- self.statements = set()
+ self.statements: Set[TLineNo] = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
- self.excluded = set()
+ self.excluded: Set[TLineNo] = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
- self.raw_statements = set()
+ self.raw_statements: Set[TLineNo] = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
- self.raw_excluded = set()
+ self.raw_excluded: Set[TLineNo] = set()
+
+ # The line numbers of class definitions.
+ self.raw_classdefs: Set[TLineNo] = set()
# The line numbers of docstring lines.
- self.raw_docstrings = set()
+ self.raw_docstrings: Set[TLineNo] = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
- self._multiline = {}
+ self._multiline: Dict[TLineNo, TLineNo] = {}
# Lazily-created arc data, and missing arc descriptions.
- self._all_arcs = None
- self._missing_arc_fragments = None
+ self._all_arcs: Optional[Set[TArc]] = None
+ self._missing_arc_fragments: Optional[TArcFragments] = None
- def lines_matching(self, *regexes):
+ def lines_matching(self, *regexes: str) -> Set[TLineNo]:
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
@@ -97,7 +114,7 @@ def lines_matching(self, *regexes):
matches.add(i)
return matches
- def _raw_parse(self):
+ def _raw_parse(self) -> None:
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
@@ -119,6 +136,7 @@ def _raw_parse(self):
first_on_line = True
nesting = 0
+ assert self.text is not None
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: debugging
@@ -130,8 +148,14 @@ def _raw_parse(self):
indent += 1
elif toktype == token.DEDENT:
indent -= 1
+ elif toktype == token.NAME:
+ if ttext == "class":
+ # Class definitions look like branches in the bytecode, so
+ # we need to exclude them. The simplest way is to note the
+ # lines with the "class" keyword.
+ self.raw_classdefs.add(slineno)
elif toktype == token.OP:
- if ttext == ':' and nesting == 0:
+ if ttext == ":" and nesting == 0:
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
@@ -141,7 +165,7 @@ def _raw_parse(self):
exclude_indent = indent
excluding = True
excluding_decorators = False
- elif ttext == '@' and first_on_line:
+ elif ttext == "@" and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
@@ -158,21 +182,20 @@ def _raw_parse(self):
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
- if first_line is not None and elineno != first_line:
+ if first_line is not None and elineno != first_line: # type: ignore[unreachable]
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
- for l in range(first_line, elineno+1):
+ for l in range(first_line, elineno+1): # type: ignore[unreachable]
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
- # A non-whitespace token.
+ # A non-white-space token.
empty = False
if first_line is None:
- # The token is not whitespace, and is the first in a
- # statement.
+ # The token is not white space, and is the first in a statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
@@ -194,32 +217,32 @@ def _raw_parse(self):
if env.PYBEHAVIOR.module_firstline_1 and self._multiline:
self._multiline[1] = min(self.raw_statements)
- def first_line(self, line):
- """Return the first line number of the statement including `line`."""
- if line < 0:
- line = -self._multiline.get(-line, -line)
+ def first_line(self, lineno: TLineNo) -> TLineNo:
+ """Return the first line number of the statement including `lineno`."""
+ if lineno < 0:
+ lineno = -self._multiline.get(-lineno, -lineno)
else:
- line = self._multiline.get(line, line)
- return line
+ lineno = self._multiline.get(lineno, lineno)
+ return lineno
- def first_lines(self, lines):
- """Map the line numbers in `lines` to the correct first line of the
+ def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]:
+ """Map the line numbers in `linenos` to the correct first line of the
statement.
Returns a set of the first lines.
"""
- return {self.first_line(l) for l in lines}
+ return {self.first_line(l) for l in linenos}
- def translate_lines(self, lines):
+ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
- def translate_arcs(self, arcs):
+ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
"""Implement `FileReporter.translate_arcs`."""
- return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
+ return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs}
- def parse_source(self):
+ def parse_source(self) -> None:
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
@@ -228,7 +251,7 @@ def parse_source(self):
"""
try:
self._raw_parse()
- except (tokenize.TokenError, IndentationError) as err:
+ except (tokenize.TokenError, IndentationError, SyntaxError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
@@ -244,7 +267,7 @@ def parse_source(self):
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
- def arcs(self):
+ def arcs(self) -> Set[TArc]:
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
@@ -253,9 +276,10 @@ def arcs(self):
"""
if self._all_arcs is None:
self._analyze_ast()
+ assert self._all_arcs is not None
return self._all_arcs
- def _analyze_ast(self):
+ def _analyze_ast(self) -> None:
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
@@ -273,13 +297,13 @@ def _analyze_ast(self):
self._missing_arc_fragments = aaa.missing_arc_fragments
- def exit_counts(self):
+ def exit_counts(self) -> Dict[TLineNo, int]:
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
- exit_counts = collections.defaultdict(int)
+ exit_counts: Dict[TLineNo, int] = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
@@ -292,12 +316,24 @@ def exit_counts(self):
continue
exit_counts[l1] += 1
+ # Class definitions have one extra exit, so remove one for each:
+ for l in self.raw_classdefs:
+ # Ensure key is there: class definitions can include excluded lines.
+ if l in exit_counts:
+ exit_counts[l] -= 1
+
return exit_counts
- def missing_arc_description(self, start, end, executed_arcs=None):
+ def missing_arc_description(
+ self,
+ start: TLineNo,
+ end: TLineNo,
+ executed_arcs: Optional[Iterable[TArc]] = None,
+ ) -> str:
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
+ assert self._missing_arc_fragments is not None
actual_start = start
@@ -337,31 +373,27 @@ def missing_arc_description(self, start, end, executed_arcs=None):
class ByteParser:
"""Parse bytecode to understand the structure of code."""
- @contract(text='unicode')
- def __init__(self, text, code=None, filename=None):
+ def __init__(
+ self,
+ text: str,
+ code: Optional[CodeType] = None,
+ filename: Optional[str] = None,
+ ) -> None:
self.text = text
- if code:
+ if code is not None:
self.code = code
else:
+ assert filename is not None
try:
- self.code = compile_unicode(text, filename, "exec")
+ self.code = compile(text, filename, "exec", dont_inherit=True)
except SyntaxError as synerr:
raise NotPython(
"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- filename, synerr.msg, synerr.lineno
+ filename, synerr.msg, synerr.lineno or 0
)
) from synerr
- # Alternative Python implementations don't always provide all the
- # attributes on code objects that we need to do the analysis.
- for attr in ['co_lnotab', 'co_firstlineno']:
- if not hasattr(self.code, attr):
- raise _StopEverything( # pragma: only jython
- "This implementation of Python doesn't support code analysis.\n" +
- "Run coverage.py under another Python for this command."
- )
-
- def child_parsers(self):
+ def child_parsers(self) -> Iterable[ByteParser]:
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
@@ -369,7 +401,7 @@ def child_parsers(self):
"""
return (ByteParser(self.text, code=c) for c in code_objects(self.code))
- def _line_numbers(self):
+ def _line_numbers(self) -> Iterable[TLineNo]:
"""Yield the line numbers possible in this code object.
Uses co_lnotab described in Python/compile.c to find the
@@ -399,7 +431,7 @@ def _line_numbers(self):
if line_num != last_line_num:
yield line_num
- def _find_statements(self):
+ def _find_statements(self) -> Iterable[TLineNo]:
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
@@ -415,7 +447,36 @@ def _find_statements(self):
# AST analysis
#
-class BlockBase:
+class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
+ """The information needed to start an arc.
+
+ `lineno` is the line number the arc starts from.
+
+ `cause` is an English text fragment used as the `startmsg` for
+ AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
+ arc wasn't executed, so should fit well into a sentence of the form,
+ "Line 17 didn't run because {cause}." The fragment can include "{lineno}"
+ to have `lineno` interpolated into it.
+
+ """
+ def __new__(cls, lineno: TLineNo, cause: Optional[str] = None) -> ArcStart:
+ return super().__new__(cls, lineno, cause)
+
+
+class TAddArcFn(Protocol):
+ """The type for AstArcAnalyzer.add_arc()."""
+ def __call__(
+ self,
+ start: TLineNo,
+ end: TLineNo,
+ smsg: Optional[str] = None,
+ emsg: Optional[str] = None,
+ ) -> None:
+ ...
+
+TArcFragments = Dict[TArc, List[Tuple[Optional[str], Optional[str]]]]
+
+class Block:
"""
Blocks need to handle various exiting statements in their own ways.
@@ -425,56 +486,54 @@ class BlockBase:
stack.
"""
# pylint: disable=unused-argument
- def process_break_exits(self, exits, add_arc):
+ def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process break exits."""
# Because break can only appear in loops, and most subclasses
# implement process_break_exits, this function is never reached.
raise AssertionError
- def process_continue_exits(self, exits, add_arc):
+ def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process continue exits."""
# Because continue can only appear in loops, and most subclasses
# implement process_continue_exits, this function is never reached.
raise AssertionError
- def process_raise_exits(self, exits, add_arc):
+ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process raise exits."""
return False
- def process_return_exits(self, exits, add_arc):
+ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process return exits."""
return False
-class LoopBlock(BlockBase):
+class LoopBlock(Block):
"""A block on the block stack representing a `for` or `while` loop."""
- @contract(start=int)
- def __init__(self, start):
+ def __init__(self, start: TLineNo) -> None:
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
- self.break_exits = set()
+ self.break_exits: Set[ArcStart] = set()
- def process_break_exits(self, exits, add_arc):
+ def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
self.break_exits.update(exits)
return True
- def process_continue_exits(self, exits, add_arc):
+ def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
return True
-class FunctionBlock(BlockBase):
+class FunctionBlock(Block):
"""A block on the block stack representing a function definition."""
- @contract(start=int, name=str)
- def __init__(self, start, name):
+ def __init__(self, start: TLineNo, name: str) -> None:
# The line number where the function starts.
self.start = start
# The name of the function.
self.name = name
- def process_raise_exits(self, exits, add_arc):
+ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
@@ -482,7 +541,7 @@ def process_raise_exits(self, exits, add_arc):
)
return True
- def process_return_exits(self, exits, add_arc):
+ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
@@ -491,10 +550,9 @@ def process_return_exits(self, exits, add_arc):
return True
-class TryBlock(BlockBase):
+class TryBlock(Block):
"""A block on the block stack representing a `try` block."""
- @contract(handler_start='int|None', final_start='int|None')
- def __init__(self, handler_start, final_start):
+ def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None:
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
@@ -502,24 +560,24 @@ def __init__(self, handler_start, final_start):
# The ArcStarts for breaks/continues/returns/raises inside the "try:"
# that need to route through the "finally:" clause.
- self.break_from = set()
- self.continue_from = set()
- self.raise_from = set()
- self.return_from = set()
+ self.break_from: Set[ArcStart] = set()
+ self.continue_from: Set[ArcStart] = set()
+ self.raise_from: Set[ArcStart] = set()
+ self.return_from: Set[ArcStart] = set()
- def process_break_exits(self, exits, add_arc):
+ def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.break_from.update(exits)
return True
return False
- def process_continue_exits(self, exits, add_arc):
+ def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.continue_from.update(exits)
return True
return False
- def process_raise_exits(self, exits, add_arc):
+ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.handler_start is not None:
for xit in exits:
add_arc(xit.lineno, self.handler_start, xit.cause)
@@ -528,17 +586,16 @@ def process_raise_exits(self, exits, add_arc):
self.raise_from.update(exits)
return True
- def process_return_exits(self, exits, add_arc):
+ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.final_start is not None:
self.return_from.update(exits)
return True
return False
-class WithBlock(BlockBase):
+class WithBlock(Block):
"""A block on the block stack representing a `with` block."""
- @contract(start=int)
- def __init__(self, start):
+ def __init__(self, start: TLineNo) -> None:
# We only ever use this block if it is needed, so that we don't have to
# check this setting in all the methods.
assert env.PYBEHAVIOR.exit_through_with
@@ -548,11 +605,16 @@ def __init__(self, start):
# The ArcStarts for breaks/continues/returns/raises inside the "with:"
# that need to go through the with-statement while exiting.
- self.break_from = set()
- self.continue_from = set()
- self.return_from = set()
-
- def _process_exits(self, exits, add_arc, from_set=None):
+ self.break_from: Set[ArcStart] = set()
+ self.continue_from: Set[ArcStart] = set()
+ self.return_from: Set[ArcStart] = set()
+
+ def _process_exits(
+ self,
+ exits: Set[ArcStart],
+ add_arc: TAddArcFn,
+ from_set: Optional[Set[ArcStart]] = None,
+ ) -> bool:
"""Helper to process the four kinds of exits."""
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
@@ -560,48 +622,27 @@ def _process_exits(self, exits, add_arc, from_set=None):
from_set.update(exits)
return True
- def process_break_exits(self, exits, add_arc):
+ def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.break_from)
- def process_continue_exits(self, exits, add_arc):
+ def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.continue_from)
- def process_raise_exits(self, exits, add_arc):
+ def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc)
- def process_return_exits(self, exits, add_arc):
+ def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
return self._process_exits(exits, add_arc, self.return_from)
-class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
- """The information needed to start an arc.
-
- `lineno` is the line number the arc starts from.
-
- `cause` is an English text fragment used as the `startmsg` for
- AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
- arc wasn't executed, so should fit well into a sentence of the form,
- "Line 17 didn't run because {cause}." The fragment can include "{lineno}"
- to have `lineno` interpolated into it.
-
- """
- def __new__(cls, lineno, cause=None):
- return super().__new__(cls, lineno, cause)
-
-
-# Define contract words that PyContract doesn't have.
-# ArcStarts is for a list or set of ArcStart's.
-new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
-
-
-class NodeList:
+class NodeList(ast.AST):
"""A synthetic fictitious node, containing a sequence of nodes.
This is used when collapsing optimized if-statements, to represent the
unconditional execution of one of the clauses.
"""
- def __init__(self, body):
+ def __init__(self, body: Sequence[ast.AST]) -> None:
self.body = body
self.lineno = body[0].lineno
@@ -609,17 +650,25 @@ def __init__(self, body):
# TODO: the cause messages have too many commas.
# TODO: Shouldn't the cause messages join with "and" instead of "or"?
-def ast_parse(text):
- """How we create an AST parse."""
- return ast.parse(neuter_encoding_declaration(text))
+def _make_expression_code_method(noun: str) -> Callable[[AstArcAnalyzer, ast.AST], None]:
+ """A function to make methods for expression-based callable _code_object__ methods."""
+ def _code_object__expression_callable(self: AstArcAnalyzer, node: ast.AST) -> None:
+ start = self.line_for_node(node)
+ self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}")
+ self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}")
+ return _code_object__expression_callable
class AstArcAnalyzer:
"""Analyze source text with an AST to find executable code paths."""
- @contract(text='unicode', statements=set)
- def __init__(self, text, statements, multiline):
- self.root_node = ast_parse(text)
+ def __init__(
+ self,
+ text: str,
+ statements: Set[TLineNo],
+ multiline: Dict[TLineNo, TLineNo],
+ ) -> None:
+ self.root_node = ast.parse(text)
# TODO: I think this is happening in too many places.
self.statements = {multiline.get(l, l) for l in statements}
self.multiline = multiline
@@ -634,20 +683,20 @@ def __init__(self, text, statements, multiline):
print(f"Multiline map: {self.multiline}")
ast_dump(self.root_node)
- self.arcs = set()
+ self.arcs: Set[TArc] = set()
# A map from arc pairs to a list of pairs of sentence fragments:
# { (start, end): [(startmsg, endmsg), ...], }
#
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
- self.missing_arc_fragments = collections.defaultdict(list)
- self.block_stack = []
+ self.missing_arc_fragments: TArcFragments = collections.defaultdict(list)
+ self.block_stack: List[Block] = []
# $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code.
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
- def analyze(self):
+ def analyze(self) -> None:
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
@@ -660,8 +709,13 @@ def analyze(self):
if code_object_handler is not None:
code_object_handler(node)
- @contract(start=int, end=int)
- def add_arc(self, start, end, smsg=None, emsg=None):
+ def add_arc(
+ self,
+ start: TLineNo,
+ end: TLineNo,
+ smsg: Optional[str] = None,
+ emsg: Optional[str] = None,
+ ) -> None:
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
print(f"\nAdding possible arc: ({start}, {end}): {smsg!r}, {emsg!r}")
@@ -671,25 +725,27 @@ def add_arc(self, start, end, smsg=None, emsg=None):
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
- def nearest_blocks(self):
+ def nearest_blocks(self) -> Iterable[Block]:
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
- @contract(returns=int)
- def line_for_node(self, node):
+ def line_for_node(self, node: ast.AST) -> TLineNo:
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
- handler = getattr(self, "_line__" + node_name, None)
+ handler = cast(
+ Optional[Callable[[ast.AST], TLineNo]],
+ getattr(self, "_line__" + node_name, None)
+ )
if handler is not None:
return handler(node)
else:
return node.lineno
- def _line_decorated(self, node):
+ def _line_decorated(self, node: ast.FunctionDef) -> TLineNo:
"""Compute first line number for things that can be decorated (classes and functions)."""
lineno = node.lineno
if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator:
@@ -697,17 +753,17 @@ def _line_decorated(self, node):
lineno = node.decorator_list[0].lineno
return lineno
- def _line__Assign(self, node):
+ def _line__Assign(self, node: ast.Assign) -> TLineNo:
return self.line_for_node(node.value)
_line__ClassDef = _line_decorated
- def _line__Dict(self, node):
+ def _line__Dict(self, node: ast.Dict) -> TLineNo:
if node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
- # Unpacked dict literals `{**{'a':1}}` have None as the key,
+ # Unpacked dict literals `{**{"a":1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
@@ -716,13 +772,13 @@ def _line__Dict(self, node):
_line__FunctionDef = _line_decorated
_line__AsyncFunctionDef = _line_decorated
- def _line__List(self, node):
+ def _line__List(self, node: ast.List) -> TLineNo:
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
- def _line__Module(self, node):
+ def _line__Module(self, node: ast.Module) -> TLineNo:
if env.PYBEHAVIOR.module_firstline_1:
return 1
elif node.body:
@@ -737,8 +793,7 @@ def _line__Module(self, node):
"Import", "ImportFrom", "Nonlocal", "Pass",
}
- @contract(returns='ArcStarts')
- def add_arcs(self, node):
+ def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next. Because a
@@ -755,7 +810,10 @@ def add_arcs(self, node):
"""
node_name = node.__class__.__name__
- handler = getattr(self, "_handle__" + node_name, None)
+ handler = cast(
+ Optional[Callable[[ast.AST], Set[ArcStart]]],
+ getattr(self, "_handle__" + node_name, None)
+ )
if handler is not None:
return handler(node)
else:
@@ -763,14 +821,17 @@ def add_arcs(self, node):
# statement), or it's something we overlooked.
if env.TESTING:
if node_name not in self.OK_TO_DEFAULT:
- raise Exception(f"*** Unhandled: {node}") # pragma: only failure
+ raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure
# Default for simple statements: one exit from this node.
return {ArcStart(self.line_for_node(node))}
- @one_of("from_start, prev_starts")
- @contract(returns='ArcStarts')
- def add_body_arcs(self, body, from_start=None, prev_starts=None):
+ def add_body_arcs(
+ self,
+ body: Sequence[ast.AST],
+ from_start: Optional[ArcStart] = None,
+ prev_starts: Optional[Set[ArcStart]] = None
+ ) -> Set[ArcStart]:
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
@@ -782,21 +843,23 @@ def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""
if prev_starts is None:
+ assert from_start is not None
prev_starts = {from_start}
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
- body_node = self.find_non_missing_node(body_node)
- if body_node is None:
+ maybe_body_node = self.find_non_missing_node(body_node)
+ if maybe_body_node is None:
continue
+ body_node = maybe_body_node
lineno = self.line_for_node(body_node)
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
- def find_non_missing_node(self, node):
+ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
@@ -813,12 +876,15 @@ def find_non_missing_node(self, node):
if first_line in self.statements:
return node
- missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
- if missing_fn:
- node = missing_fn(node)
+ missing_fn = cast(
+ Optional[Callable[[ast.AST], Optional[ast.AST]]],
+ getattr(self, "_missing__" + node.__class__.__name__, None)
+ )
+ if missing_fn is not None:
+ ret_node = missing_fn(node)
else:
- node = None
- return node
+ ret_node = None
+ return ret_node
# Missing nodes: _missing__*
#
@@ -827,7 +893,7 @@ def find_non_missing_node(self, node):
# find_non_missing_node) to find a node to use instead of the missing
# node. They can return None if the node should truly be gone.
- def _missing__If(self, node):
+ def _missing__If(self, node: ast.If) -> Optional[ast.AST]:
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
@@ -838,14 +904,14 @@ def _missing__If(self, node):
return self.find_non_missing_node(NodeList(node.orelse))
return None
- def _missing__NodeList(self, node):
+ def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]:
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
for child in node.body:
- child = self.find_non_missing_node(child)
- if child is not None:
- non_missing_children.append(child)
+ maybe_child = self.find_non_missing_node(child)
+ if maybe_child is not None:
+ non_missing_children.append(maybe_child)
# Return the simplest representation of the present children.
if not non_missing_children:
@@ -854,7 +920,7 @@ def _missing__NodeList(self, node):
return non_missing_children[0]
return NodeList(non_missing_children)
- def _missing__While(self, node):
+ def _missing__While(self, node: ast.While) -> Optional[ast.AST]:
body_nodes = self.find_non_missing_node(NodeList(node.body))
if not body_nodes:
return None
@@ -864,16 +930,17 @@ def _missing__While(self, node):
new_while.test = ast.Name()
new_while.test.lineno = body_nodes.lineno
new_while.test.id = "True"
+ assert hasattr(body_nodes, "body")
new_while.body = body_nodes.body
- new_while.orelse = None
+ new_while.orelse = []
return new_while
- def is_constant_expr(self, node):
+ def is_constant_expr(self, node: ast.AST) -> Optional[str]:
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["Constant", "NameConstant", "Num"]:
return "Num"
- elif node_name == "Name":
+ elif isinstance(node, ast.Name):
if node.id in ["True", "False", "None", "__debug__"]:
return "Name"
return None
@@ -885,7 +952,6 @@ def is_constant_expr(self, node):
# listcomps hidden in lists: x = [[i for i in range(10)]]
# nested function definitions
-
# Exit processing: process_*_exits
#
# These functions process the four kinds of jump exits: break, continue,
@@ -894,29 +960,25 @@ def is_constant_expr(self, node):
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
- @contract(exits='ArcStarts')
- def process_break_exits(self, exits):
+ def process_break_exits(self, exits: Set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_break_exits(exits, self.add_arc):
break
- @contract(exits='ArcStarts')
- def process_continue_exits(self, exits):
+ def process_continue_exits(self, exits: Set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_continue_exits(exits, self.add_arc):
break
- @contract(exits='ArcStarts')
- def process_raise_exits(self, exits):
+ def process_raise_exits(self, exits: Set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if block.process_raise_exits(exits, self.add_arc):
break
- @contract(exits='ArcStarts')
- def process_return_exits(self, exits):
+ def process_return_exits(self, exits: Set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_return_exits(exits, self.add_arc):
@@ -933,17 +995,16 @@ def process_return_exits(self, exits):
# Every node type that represents a statement should have a handler, or it
# should be listed in OK_TO_DEFAULT.
- @contract(returns='ArcStarts')
- def _handle__Break(self, node):
+ def _handle__Break(self, node: ast.Break) -> Set[ArcStart]:
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
- self.process_break_exits([break_start])
+ self.process_break_exits({break_start})
return set()
- @contract(returns='ArcStarts')
- def _handle_decorated(self, node):
+ def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]:
"""Add arcs for things that can be decorated (classes and functions)."""
- main_line = last = node.lineno
+ main_line: TLineNo = node.lineno
+ last: Optional[TLineNo] = node.lineno
decs = node.decorator_list
if decs:
if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator:
@@ -953,6 +1014,7 @@ def _handle_decorated(self, node):
if last is not None and dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
+ assert last is not None
if env.PYBEHAVIOR.trace_decorated_def:
self.add_arc(last, main_line)
last = main_line
@@ -973,19 +1035,18 @@ def _handle_decorated(self, node):
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
+ assert last is not None
return {ArcStart(last)}
_handle__ClassDef = _handle_decorated
- @contract(returns='ArcStarts')
- def _handle__Continue(self, node):
+ def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]:
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
- self.process_continue_exits([continue_start])
+ self.process_continue_exits({continue_start})
return set()
- @contract(returns='ArcStarts')
- def _handle__For(self, node):
+ def _handle__For(self, node: ast.For) -> Set[ArcStart]:
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
@@ -994,6 +1055,7 @@ def _handle__For(self, node):
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
+ assert isinstance(my_block, LoopBlock)
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
@@ -1009,8 +1071,7 @@ def _handle__For(self, node):
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
- @contract(returns='ArcStarts')
- def _handle__If(self, node):
+ def _handle__If(self, node: ast.If) -> Set[ArcStart]:
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
@@ -1018,48 +1079,50 @@ def _handle__If(self, node):
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
- @contract(returns='ArcStarts')
- def _handle__Match(self, node):
- start = self.line_for_node(node)
- last_start = start
- exits = set()
- had_wildcard = False
- for case in node.cases:
- case_start = self.line_for_node(case.pattern)
- if isinstance(case.pattern, ast.MatchAs):
- had_wildcard = True
- self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
- from_start = ArcStart(case_start, cause="the pattern on line {lineno} never matched")
- exits |= self.add_body_arcs(case.body, from_start=from_start)
- last_start = case_start
- if not had_wildcard:
- exits.add(from_start)
- return exits
+ if sys.version_info >= (3, 10):
+ def _handle__Match(self, node: ast.Match) -> Set[ArcStart]:
+ start = self.line_for_node(node)
+ last_start = start
+ exits = set()
+ had_wildcard = False
+ for case in node.cases:
+ case_start = self.line_for_node(case.pattern)
+ pattern = case.pattern
+ while isinstance(pattern, ast.MatchOr):
+ pattern = pattern.patterns[-1]
+ if isinstance(pattern, ast.MatchAs):
+ had_wildcard = True
+ self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
+ from_start = ArcStart(
+ case_start,
+ cause="the pattern on line {lineno} never matched",
+ )
+ exits |= self.add_body_arcs(case.body, from_start=from_start)
+ last_start = case_start
+ if not had_wildcard:
+ exits.add(from_start)
+ return exits
- @contract(returns='ArcStarts')
- def _handle__NodeList(self, node):
+ def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]:
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
- @contract(returns='ArcStarts')
- def _handle__Raise(self, node):
+ def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]:
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
- self.process_raise_exits([raise_start])
+ self.process_raise_exits({raise_start})
# `raise` statement jumps away, no exits from here.
return set()
- @contract(returns='ArcStarts')
- def _handle__Return(self, node):
+ def _handle__Return(self, node: ast.Return) -> Set[ArcStart]:
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
- self.process_return_exits([return_start])
+ self.process_return_exits({return_start})
# `return` statement jumps away, no exits from here.
return set()
- @contract(returns='ArcStarts')
- def _handle__Try(self, node):
+ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
@@ -1092,10 +1155,10 @@ def _handle__Try(self, node):
else:
self.block_stack.pop()
- handler_exits = set()
+ handler_exits: Set[ArcStart] = set()
if node.handlers:
- last_handler_start = None
+ last_handler_start: Optional[TLineNo] = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
@@ -1170,8 +1233,7 @@ def _handle__Try(self, node):
return exits
- @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
- def _combine_finally_starts(self, starts, exits):
+ def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]:
"""Helper for building the cause of `finally` branches.
"finally" clauses might not execute their exits, and the causes could
@@ -1186,8 +1248,7 @@ def _combine_finally_starts(self, starts, exits):
exits = {ArcStart(xit.lineno, cause) for xit in exits}
return exits
- @contract(returns='ArcStarts')
- def _handle__While(self, node):
+ def _handle__While(self, node: ast.While) -> Set[ArcStart]:
start = to_top = self.line_for_node(node.test)
constant_test = self.is_constant_expr(node.test)
top_is_body0 = False
@@ -1204,6 +1265,7 @@ def _handle__While(self, node):
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
+ assert isinstance(my_block, LoopBlock)
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
@@ -1215,14 +1277,14 @@ def _handle__While(self, node):
exits.add(from_start)
return exits
- @contract(returns='ArcStarts')
- def _handle__With(self, node):
+ def _handle__With(self, node: ast.With) -> Set[ArcStart]:
start = self.line_for_node(node)
if env.PYBEHAVIOR.exit_through_with:
self.block_stack.append(WithBlock(start=start))
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
if env.PYBEHAVIOR.exit_through_with:
with_block = self.block_stack.pop()
+ assert isinstance(with_block, WithBlock)
with_exit = {ArcStart(start)}
if exits:
for xit in exits:
@@ -1249,7 +1311,7 @@ def _handle__With(self, node):
# These methods are used by analyze() as the start of the analysis.
# There is one for each construct with a code object.
- def _code_object__Module(self, node):
+ def _code_object__Module(self, node: ast.Module) -> None:
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
@@ -1260,7 +1322,7 @@ def _code_object__Module(self, node):
self.add_arc(-start, start)
self.add_arc(start, -start)
- def _code_object__FunctionDef(self, node):
+ def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None:
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
@@ -1269,7 +1331,7 @@ def _code_object__FunctionDef(self, node):
_code_object__AsyncFunctionDef = _code_object__FunctionDef
- def _code_object__ClassDef(self, node):
+ def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
@@ -1279,34 +1341,30 @@ def _code_object__ClassDef(self, node):
f"didn't exit the body of class {node.name!r}",
)
- def _make_expression_code_method(noun): # pylint: disable=no-self-argument
- """A function to make methods for expression-based callable _code_object__ methods."""
- def _code_object__expression_callable(self, node):
- start = self.line_for_node(node)
- self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}")
- self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}")
- return _code_object__expression_callable
-
- # pylint: disable=too-many-function-args
_code_object__Lambda = _make_expression_code_method("lambda")
_code_object__GeneratorExp = _make_expression_code_method("generator expression")
- _code_object__DictComp = _make_expression_code_method("dictionary comprehension")
- _code_object__SetComp = _make_expression_code_method("set comprehension")
- _code_object__ListComp = _make_expression_code_method("list comprehension")
+ if env.PYBEHAVIOR.comprehensions_are_functions:
+ _code_object__DictComp = _make_expression_code_method("dictionary comprehension")
+ _code_object__SetComp = _make_expression_code_method("set comprehension")
+ _code_object__ListComp = _make_expression_code_method("list comprehension")
# Code only used when dumping the AST for debugging.
SKIP_DUMP_FIELDS = ["ctx"]
-def _is_simple_value(value):
+def _is_simple_value(value: Any) -> bool:
"""Is `value` simple enough to be displayed on a single line?"""
return (
- value in [None, [], (), {}, set()] or
+ value in [None, [], (), {}, set(), frozenset(), Ellipsis] or
isinstance(value, (bytes, int, float, str))
)
-def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin
+def ast_dump(
+ node: ast.AST,
+ depth: int = 0,
+ print: Callable[[str], None] = print, # pylint: disable=redefined-builtin
+) -> None:
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
@@ -1317,6 +1375,7 @@ def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin
if lineno is not None:
linemark = f" @ {node.lineno},{node.col_offset}"
if hasattr(node, "end_lineno"):
+ assert hasattr(node, "end_col_offset")
linemark += ":"
if node.end_lineno != node.lineno:
linemark += f"{node.end_lineno},"
@@ -1338,7 +1397,7 @@ def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin
else:
print(head)
if 0:
- print("{}# mro: {}".format(
+ print("{}# mro: {}".format( # type: ignore[unreachable]
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 7184f1604..d5659268d 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -3,17 +3,26 @@
"""Better tokenizing for coverage.py."""
+from __future__ import annotations
+
import ast
+import io
import keyword
import re
+import sys
import token
import tokenize
+from typing import Iterable, List, Optional, Set, Tuple
+
from coverage import env
-from coverage.misc import contract
+from coverage.types import TLineNo, TSourceTokenLines
+
+TokenInfos = Iterable[tokenize.TokenInfo]
-def phys_tokens(toks):
+
+def _phys_tokens(toks: TokenInfos) -> TokenInfos:
"""Return all physical tokens, even line continuations.
tokenize.generate_tokens() doesn't return a token for the backslash that
@@ -23,9 +32,9 @@ def phys_tokens(toks):
Returns the same values as generate_tokens()
"""
- last_line = None
+ last_line: Optional[str] = None
last_lineno = -1
- last_ttext = None
+ last_ttext: str = ""
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
if last_line and last_line.endswith("\\\n"):
@@ -48,7 +57,7 @@ def phys_tokens(toks):
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
- if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
+ if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
@@ -56,7 +65,7 @@ def phys_tokens(toks):
# Figure out what column the backslash is in.
ccol = len(last_line.split("\n")[-2]) - 1
# Yield the token, with a fake token type.
- yield (
+ yield tokenize.TokenInfo(
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
last_line
@@ -64,27 +73,27 @@ def phys_tokens(toks):
last_line = ltext
if ttype not in (tokenize.NEWLINE, tokenize.NL):
last_ttext = ttext
- yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
+ yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext)
last_lineno = elineno
class MatchCaseFinder(ast.NodeVisitor):
"""Helper for finding match/case lines."""
- def __init__(self, source):
+ def __init__(self, source: str) -> None:
# This will be the set of line numbers that start match or case statements.
- self.match_case_lines = set()
+ self.match_case_lines: Set[TLineNo] = set()
self.visit(ast.parse(source))
- def visit_Match(self, node):
- """Invoked by ast.NodeVisitor.visit"""
- self.match_case_lines.add(node.lineno)
- for case in node.cases:
- self.match_case_lines.add(case.pattern.lineno)
- self.generic_visit(node)
+ if sys.version_info >= (3, 10):
+ def visit_Match(self, node: ast.Match) -> None:
+ """Invoked by ast.NodeVisitor.visit"""
+ self.match_case_lines.add(node.lineno)
+ for case in node.cases:
+ self.match_case_lines.add(case.pattern.lineno)
+ self.generic_visit(node)
-@contract(source='unicode')
-def source_token_lines(source):
+def source_token_lines(source: str) -> TSourceTokenLines:
"""Generate a series of lines, one for each line in `source`.
Each line is a list of pairs, each pair is a token::
@@ -95,30 +104,30 @@ def source_token_lines(source):
If you concatenate all the token texts, and then join them with newlines,
you should have your original `source` back, with two differences:
- trailing whitespace is not preserved, and a final line with no newline
+ trailing white space is not preserved, and a final line with no newline
is indistinguishable from a final line with a newline.
"""
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
- line = []
+ line: List[Tuple[str, str]] = []
col = 0
- source = source.expandtabs(8).replace('\r\n', '\n')
+ source = source.expandtabs(8).replace("\r\n", "\n")
tokgen = generate_tokens(source)
if env.PYBEHAVIOR.soft_keywords:
match_case_lines = MatchCaseFinder(source).match_case_lines
- for ttype, ttext, (sline, scol), (_, ecol), _ in phys_tokens(tokgen):
+ for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
mark_start = True
- for part in re.split('(\n)', ttext):
- if part == '\n':
+ for part in re.split("(\n)", ttext):
+ if part == "\n":
yield line
line = []
col = 0
mark_end = False
- elif part == '':
+ elif part == "":
mark_end = False
elif ttype in ws_tokens:
mark_end = False
@@ -126,22 +135,25 @@ def source_token_lines(source):
if mark_start and scol > col:
line.append(("ws", " " * (scol - col)))
mark_start = False
- tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
+ tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
if ttype == token.NAME:
if keyword.iskeyword(ttext):
# Hard keywords are always keywords.
tok_class = "key"
- elif env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
- # Soft keywords appear at the start of the line, on lines that start
- # match or case statements.
- if len(line) == 0:
- is_start_of_line = True
- elif (len(line) == 1) and line[0][0] == "ws":
- is_start_of_line = True
- else:
- is_start_of_line = False
- if is_start_of_line and sline in match_case_lines:
- tok_class = "key"
+ elif sys.version_info >= (3, 10): # PYVERSIONS
+ # Need the version_info check to keep mypy from borking
+ # on issoftkeyword here.
+ if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
+ # Soft keywords appear at the start of the line,
+ # on lines that start match or case statements.
+ if len(line) == 0:
+ is_start_of_line = True
+ elif (len(line) == 1) and line[0][0] == "ws":
+ is_start_of_line = True
+ else:
+ is_start_of_line = False
+ if is_start_of_line and sline in match_case_lines:
+ tok_class = "key"
line.append((tok_class, part))
mark_end = True
scol = 0
@@ -163,27 +175,27 @@ class CachedTokenizer:
actually tokenize twice.
"""
- def __init__(self):
- self.last_text = None
- self.last_tokens = None
+ def __init__(self) -> None:
+ self.last_text: Optional[str] = None
+ self.last_tokens: List[tokenize.TokenInfo] = []
- @contract(text='unicode')
- def generate_tokens(self, text):
+ def generate_tokens(self, text: str) -> TokenInfos:
"""A stand-in for `tokenize.generate_tokens`."""
if text != self.last_text:
self.last_text = text
- readline = iter(text.splitlines(True)).__next__
- self.last_tokens = list(tokenize.generate_tokens(readline))
+ readline = io.StringIO(text).readline
+ try:
+ self.last_tokens = list(tokenize.generate_tokens(readline))
+ except:
+ self.last_text = None
+ raise
return self.last_tokens
# Create our generate_tokens cache as a callable replacement function.
generate_tokens = CachedTokenizer().generate_tokens
-COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
-
-@contract(source='bytes')
-def source_encoding(source):
+def source_encoding(source: bytes) -> str:
"""Determine the encoding for `source`, according to PEP 263.
`source` is a byte string: the text of the program.
@@ -193,31 +205,3 @@ def source_encoding(source):
"""
readline = iter(source.splitlines(True)).__next__
return tokenize.detect_encoding(readline)[0]
-
-
-@contract(source='unicode')
-def compile_unicode(source, filename, mode):
- """Just like the `compile` builtin, but works on any Unicode string.
-
- Python 2's compile() builtin has a stupid restriction: if the source string
- is Unicode, then it may not have a encoding declaration in it. Why not?
- Who knows! It also decodes to utf-8, and then tries to interpret those
- utf-8 bytes according to the encoding declaration. Why? Who knows!
-
- This function neuters the coding declaration, and compiles it.
-
- """
- source = neuter_encoding_declaration(source)
- code = compile(source, filename, mode)
- return code
-
-
-@contract(source='unicode', returns='unicode')
-def neuter_encoding_declaration(source):
- """Return `source`, with any encoding declaration neutered."""
- if COOKIE_RE.search(source):
- source_lines = source.splitlines(True)
- for lineno in range(min(2, len(source_lines))):
- source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno])
- source = "".join(source_lines)
- return source
diff --git a/coverage/plugin.py b/coverage/plugin.py
index bf30b1b73..2c1ffada4 100644
--- a/coverage/plugin.py
+++ b/coverage/plugin.py
@@ -112,16 +112,25 @@ def coverage_init(reg, options):
"""
+from __future__ import annotations
+
import functools
+from types import FrameType
+from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union
+
from coverage import files
-from coverage.misc import contract, _needs_to_implement
+from coverage.misc import _needs_to_implement
+from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines
class CoveragePlugin:
"""Base class for coverage.py plug-ins."""
- def file_tracer(self, filename): # pylint: disable=unused-argument
+ _coverage_plugin_name: str
+ _coverage_enabled: bool
+
+ def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument
"""Get a :class:`FileTracer` object for a file.
Plug-in type: file tracer.
@@ -161,7 +170,10 @@ def file_tracer(self, filename): # pylint: disable=unused-argument
"""
return None
- def file_reporter(self, filename): # pylint: disable=unused-argument
+ def file_reporter( # type: ignore[return]
+ self,
+ filename: str, # pylint: disable=unused-argument
+ ) -> Union[FileReporter, str]: # str should be Literal["python"]
"""Get the :class:`FileReporter` class to use for a file.
Plug-in type: file tracer.
@@ -175,7 +187,10 @@ def file_reporter(self, filename): # pylint: disable=unused-argument
"""
_needs_to_implement(self, "file_reporter")
- def dynamic_context(self, frame): # pylint: disable=unused-argument
+ def dynamic_context(
+ self,
+ frame: FrameType, # pylint: disable=unused-argument
+ ) -> Optional[str]:
"""Get the dynamically computed context label for `frame`.
Plug-in type: dynamic context.
@@ -191,7 +206,10 @@ def dynamic_context(self, frame): # pylint: disable=unused-argument
"""
return None
- def find_executable_files(self, src_dir): # pylint: disable=unused-argument
+ def find_executable_files(
+ self,
+ src_dir: str, # pylint: disable=unused-argument
+ ) -> Iterable[str]:
"""Yield all of the executable files in `src_dir`, recursively.
Plug-in type: file tracer.
@@ -206,7 +224,7 @@ def find_executable_files(self, src_dir): # pylint: disable=unused-argumen
"""
return []
- def configure(self, config):
+ def configure(self, config: TConfigurable) -> None:
"""Modify the configuration of coverage.py.
Plug-in type: configurer.
@@ -220,7 +238,7 @@ def configure(self, config):
"""
pass
- def sys_info(self):
+ def sys_info(self) -> Iterable[Tuple[str, Any]]:
"""Get a list of information useful for debugging.
Plug-in type: any.
@@ -234,7 +252,12 @@ def sys_info(self):
return []
-class FileTracer:
+class CoveragePluginBase:
+ """Plugins produce specialized objects, which point back to the original plugin."""
+ _coverage_plugin: CoveragePlugin
+
+
+class FileTracer(CoveragePluginBase):
"""Support needed for files during the execution phase.
File tracer plug-ins implement subclasses of FileTracer to return from
@@ -251,7 +274,7 @@ class FileTracer:
"""
- def source_filename(self):
+ def source_filename(self) -> str: # type: ignore[return]
"""The source file name for this file.
This may be any file name you like. A key responsibility of a plug-in
@@ -266,7 +289,7 @@ def source_filename(self):
"""
_needs_to_implement(self, "source_filename")
- def has_dynamic_source_filename(self):
+ def has_dynamic_source_filename(self) -> bool:
"""Does this FileTracer have dynamic source file names?
FileTracers can provide dynamically determined file names by
@@ -284,7 +307,11 @@ def has_dynamic_source_filename(self):
"""
return False
- def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument
+ def dynamic_source_filename(
+ self,
+ filename: str, # pylint: disable=unused-argument
+ frame: FrameType, # pylint: disable=unused-argument
+ ) -> Optional[str]:
"""Get a dynamically computed source file name.
Some plug-ins need to compute the source file name dynamically for each
@@ -299,7 +326,7 @@ def dynamic_source_filename(self, filename, frame): # pylint: disable=unused
"""
return None
- def line_number_range(self, frame):
+ def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
"""Get the range of source line numbers for a given a call frame.
The call frame is examined, and the source line number in the original
@@ -318,7 +345,7 @@ def line_number_range(self, frame):
@functools.total_ordering
-class FileReporter:
+class FileReporter(CoveragePluginBase):
"""Support needed for files during the analysis and reporting phases.
File tracer plug-ins implement a subclass of `FileReporter`, and return
@@ -331,7 +358,7 @@ class FileReporter:
"""
- def __init__(self, filename):
+ def __init__(self, filename: str) -> None:
"""Simple initialization of a `FileReporter`.
The `filename` argument is the path to the file being reported. This
@@ -341,10 +368,10 @@ def __init__(self, filename):
"""
self.filename = filename
- def __repr__(self):
+ def __repr__(self) -> str:
return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
- def relative_filename(self):
+ def relative_filename(self) -> str:
"""Get the relative file name for this file.
This file path will be displayed in reports. The default
@@ -355,8 +382,7 @@ def relative_filename(self):
"""
return files.relative_filename(self.filename)
- @contract(returns='unicode')
- def source(self):
+ def source(self) -> str:
"""Get the source for the file.
Returns a Unicode string.
@@ -366,10 +392,10 @@ def source(self):
as a text file, or if you need other encoding support.
"""
- with open(self.filename, "rb") as f:
- return f.read().decode("utf-8")
+ with open(self.filename, encoding="utf-8") as f:
+ return f.read()
- def lines(self):
+ def lines(self) -> Set[TLineNo]: # type: ignore[return]
"""Get the executable lines in this file.
Your plug-in must determine which lines in the file were possibly
@@ -380,7 +406,7 @@ def lines(self):
"""
_needs_to_implement(self, "lines")
- def excluded_lines(self):
+ def excluded_lines(self) -> Set[TLineNo]:
"""Get the excluded executable lines in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -393,7 +419,7 @@ def excluded_lines(self):
"""
return set()
- def translate_lines(self, lines):
+ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
"""Translate recorded lines into reported lines.
Some file formats will want to report lines slightly differently than
@@ -413,7 +439,7 @@ def translate_lines(self, lines):
"""
return set(lines)
- def arcs(self):
+ def arcs(self) -> Set[TArc]:
"""Get the executable arcs in this file.
To support branch coverage, your plug-in needs to be able to indicate
@@ -427,7 +453,7 @@ def arcs(self):
"""
return set()
- def no_branch_lines(self):
+ def no_branch_lines(self) -> Set[TLineNo]:
"""Get the lines excused from branch coverage in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -440,7 +466,7 @@ def no_branch_lines(self):
"""
return set()
- def translate_arcs(self, arcs):
+ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
"""Translate recorded arcs into reported arcs.
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
@@ -451,9 +477,9 @@ def translate_arcs(self, arcs):
The default implementation returns `arcs` unchanged.
"""
- return arcs
+ return set(arcs)
- def exit_counts(self):
+ def exit_counts(self) -> Dict[TLineNo, int]:
"""Get a count of exits from that each line.
To determine which lines are branches, coverage.py looks for lines that
@@ -466,7 +492,12 @@ def exit_counts(self):
"""
return {}
- def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument
+ def missing_arc_description(
+ self,
+ start: TLineNo,
+ end: TLineNo,
+ executed_arcs: Optional[Iterable[TArc]] = None, # pylint: disable=unused-argument
+ ) -> str:
"""Provide an English sentence describing a missing arc.
The `start` and `end` arguments are the line numbers of the missing
@@ -481,41 +512,42 @@ def missing_arc_description(self, start, end, executed_arcs=None): # pylint:
"""
return f"Line {start} didn't jump to line {end}"
- def source_token_lines(self):
+ def source_token_lines(self) -> TSourceTokenLines:
"""Generate a series of tokenized lines, one for each line in `source`.
These tokens are used for syntax-colored reports.
Each line is a list of pairs, each pair is a token::
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+ [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
Each pair has a token class, and the token text. The token classes
are:
- * ``'com'``: a comment
- * ``'key'``: a keyword
- * ``'nam'``: a name, or identifier
- * ``'num'``: a number
- * ``'op'``: an operator
- * ``'str'``: a string literal
- * ``'ws'``: some white space
- * ``'txt'``: some other kind of text
+ * ``"com"``: a comment
+ * ``"key"``: a keyword
+ * ``"nam"``: a name, or identifier
+ * ``"num"``: a number
+ * ``"op"``: an operator
+ * ``"str"``: a string literal
+ * ``"ws"``: some white space
+ * ``"txt"``: some other kind of text
If you concatenate all the token texts, and then join them with
newlines, you should have your original source back.
The default implementation simply returns each line tagged as
- ``'txt'``.
+ ``"txt"``.
"""
for line in self.source().splitlines():
- yield [('txt', line)]
+ yield [("txt", line)]
- def __eq__(self, other):
+ def __eq__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename == other.filename
- def __lt__(self, other):
+ def __lt__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename < other.filename
- __hash__ = None # This object doesn't need to be hashed.
+ # This object doesn't need to be hashed.
+ __hash__ = None # type: ignore[assignment]
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index 0b8923918..c99fb5e30 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -3,13 +3,21 @@
"""Support for plugins."""
+from __future__ import annotations
+
import os
import os.path
import sys
+from types import FrameType
+from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union
+
from coverage.exceptions import PluginError
from coverage.misc import isolate_module
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
+from coverage.types import (
+ TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines,
+)
os = isolate_module(os)
@@ -17,18 +25,23 @@
class Plugins:
"""The currently loaded collection of coverage.py plugins."""
- def __init__(self):
- self.order = []
- self.names = {}
- self.file_tracers = []
- self.configurers = []
- self.context_switchers = []
+ def __init__(self) -> None:
+ self.order: List[CoveragePlugin] = []
+ self.names: Dict[str, CoveragePlugin] = {}
+ self.file_tracers: List[CoveragePlugin] = []
+ self.configurers: List[CoveragePlugin] = []
+ self.context_switchers: List[CoveragePlugin] = []
- self.current_module = None
- self.debug = None
+ self.current_module: Optional[str] = None
+ self.debug: Optional[TDebugCtl]
@classmethod
- def load_plugins(cls, modules, config, debug=None):
+ def load_plugins(
+ cls,
+ modules: Iterable[str],
+ config: TPluginConfig,
+ debug: Optional[TDebugCtl] = None,
+ ) -> Plugins:
"""Load plugins from `modules`.
Returns a Plugins object with the loaded and configured plugins.
@@ -54,7 +67,7 @@ def load_plugins(cls, modules, config, debug=None):
plugins.current_module = None
return plugins
- def add_file_tracer(self, plugin):
+ def add_file_tracer(self, plugin: CoveragePlugin) -> None:
"""Add a file tracer plugin.
`plugin` is an instance of a third-party plugin class. It must
@@ -63,7 +76,7 @@ def add_file_tracer(self, plugin):
"""
self._add_plugin(plugin, self.file_tracers)
- def add_configurer(self, plugin):
+ def add_configurer(self, plugin: CoveragePlugin) -> None:
"""Add a configuring plugin.
`plugin` is an instance of a third-party plugin class. It must
@@ -72,7 +85,7 @@ def add_configurer(self, plugin):
"""
self._add_plugin(plugin, self.configurers)
- def add_dynamic_context(self, plugin):
+ def add_dynamic_context(self, plugin: CoveragePlugin) -> None:
"""Add a dynamic context plugin.
`plugin` is an instance of a third-party plugin class. It must
@@ -81,7 +94,7 @@ def add_dynamic_context(self, plugin):
"""
self._add_plugin(plugin, self.context_switchers)
- def add_noop(self, plugin):
+ def add_noop(self, plugin: CoveragePlugin) -> None:
"""Add a plugin that does nothing.
This is only useful for testing the plugin support.
@@ -89,7 +102,11 @@ def add_noop(self, plugin):
"""
self._add_plugin(plugin, None)
- def _add_plugin(self, plugin, specialized):
+ def _add_plugin(
+ self,
+ plugin: CoveragePlugin,
+ specialized: Optional[List[CoveragePlugin]],
+ ) -> None:
"""Add a plugin object.
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
@@ -97,12 +114,11 @@ def _add_plugin(self, plugin, specialized):
"""
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
- if self.debug and self.debug.should('plugin'):
+ if self.debug and self.debug.should("plugin"):
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
- # pylint: disable=attribute-defined-outside-init
plugin._coverage_plugin_name = plugin_name
plugin._coverage_enabled = True
self.order.append(plugin)
@@ -110,13 +126,13 @@ def _add_plugin(self, plugin, specialized):
if specialized is not None:
specialized.append(plugin)
- def __bool__(self):
+ def __bool__(self) -> bool:
return bool(self.order)
- def __iter__(self):
+ def __iter__(self) -> Iterator[CoveragePlugin]:
return iter(self.order)
- def get(self, plugin_name):
+ def get(self, plugin_name: str) -> CoveragePlugin:
"""Return a plugin by name."""
return self.names[plugin_name]
@@ -124,20 +140,20 @@ def get(self, plugin_name):
class LabelledDebug:
"""A Debug writer, but with labels for prepending to the messages."""
- def __init__(self, label, debug, prev_labels=()):
+ def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()):
self.labels = list(prev_labels) + [label]
self.debug = debug
- def add_label(self, label):
+ def add_label(self, label: str) -> LabelledDebug:
"""Add a label to the writer, and return a new `LabelledDebug`."""
return LabelledDebug(label, self.debug, self.labels)
- def message_prefix(self):
+ def message_prefix(self) -> str:
"""The prefix to use on messages, combining the labels."""
- prefixes = self.labels + ['']
+ prefixes = self.labels + [""]
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
- def write(self, message):
+ def write(self, message: str) -> None:
"""Write `message`, but with the labels prepended."""
self.debug.write(f"{self.message_prefix()}{message}")
@@ -145,12 +161,12 @@ def write(self, message):
class DebugPluginWrapper(CoveragePlugin):
"""Wrap a plugin, and use debug to report on what it's doing."""
- def __init__(self, plugin, debug):
+ def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
super().__init__()
self.plugin = plugin
self.debug = debug
- def file_tracer(self, filename):
+ def file_tracer(self, filename: str) -> Optional[FileTracer]:
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
@@ -158,64 +174,65 @@ def file_tracer(self, filename):
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
- def file_reporter(self, filename):
+ def file_reporter(self, filename: str) -> Union[FileReporter, str]:
reporter = self.plugin.file_reporter(filename)
+ assert isinstance(reporter, FileReporter)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
if reporter:
debug = self.debug.add_label(f"file {filename!r}")
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
- def dynamic_context(self, frame):
+ def dynamic_context(self, frame: FrameType) -> Optional[str]:
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
- def find_executable_files(self, src_dir):
+ def find_executable_files(self, src_dir: str) -> Iterable[str]:
executable_files = self.plugin.find_executable_files(src_dir)
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
return executable_files
- def configure(self, config):
+ def configure(self, config: TConfigurable) -> None:
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
- def sys_info(self):
+ def sys_info(self) -> Iterable[Tuple[str, Any]]:
return self.plugin.sys_info()
class DebugFileTracerWrapper(FileTracer):
"""A debugging `FileTracer`."""
- def __init__(self, tracer, debug):
+ def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None:
self.tracer = tracer
self.debug = debug
- def _show_frame(self, frame):
+ def _show_frame(self, frame: FrameType) -> str:
"""A short string identifying a frame, for debug messages."""
return "%s@%d" % (
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
)
- def source_filename(self):
+ def source_filename(self) -> str:
sfilename = self.tracer.source_filename()
self.debug.write(f"source_filename() --> {sfilename!r}")
return sfilename
- def has_dynamic_source_filename(self):
+ def has_dynamic_source_filename(self) -> bool:
has = self.tracer.has_dynamic_source_filename()
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
return has
- def dynamic_source_filename(self, filename, frame):
+ def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]:
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
filename, self._show_frame(frame), dyn,
))
return dyn
- def line_number_range(self, frame):
+ def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
pair = self.tracer.line_number_range(frame)
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
return pair
@@ -224,57 +241,57 @@ def line_number_range(self, frame):
class DebugFileReporterWrapper(FileReporter):
"""A debugging `FileReporter`."""
- def __init__(self, filename, reporter, debug):
+ def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None:
super().__init__(filename)
self.reporter = reporter
self.debug = debug
- def relative_filename(self):
+ def relative_filename(self) -> str:
ret = self.reporter.relative_filename()
self.debug.write(f"relative_filename() --> {ret!r}")
return ret
- def lines(self):
+ def lines(self) -> Set[TLineNo]:
ret = self.reporter.lines()
self.debug.write(f"lines() --> {ret!r}")
return ret
- def excluded_lines(self):
+ def excluded_lines(self) -> Set[TLineNo]:
ret = self.reporter.excluded_lines()
self.debug.write(f"excluded_lines() --> {ret!r}")
return ret
- def translate_lines(self, lines):
+ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
ret = self.reporter.translate_lines(lines)
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
return ret
- def translate_arcs(self, arcs):
+ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
ret = self.reporter.translate_arcs(arcs)
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
return ret
- def no_branch_lines(self):
+ def no_branch_lines(self) -> Set[TLineNo]:
ret = self.reporter.no_branch_lines()
self.debug.write(f"no_branch_lines() --> {ret!r}")
return ret
- def exit_counts(self):
+ def exit_counts(self) -> Dict[TLineNo, int]:
ret = self.reporter.exit_counts()
self.debug.write(f"exit_counts() --> {ret!r}")
return ret
- def arcs(self):
+ def arcs(self) -> Set[TArc]:
ret = self.reporter.arcs()
self.debug.write(f"arcs() --> {ret!r}")
return ret
- def source(self):
+ def source(self) -> str:
ret = self.reporter.source()
self.debug.write("source() --> %d chars" % (len(ret),))
return ret
- def source_token_lines(self):
+ def source_token_lines(self) -> TSourceTokenLines:
ret = list(self.reporter.source_token_lines())
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
return ret
diff --git a/coverage/py.typed b/coverage/py.typed
new file mode 100644
index 000000000..bacd23a18
--- /dev/null
+++ b/coverage/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561 to indicate that this package has type hints.
diff --git a/coverage/python.py b/coverage/python.py
index da43e6e8b..3deb6819f 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -3,23 +3,30 @@
"""Python source expertise for coverage.py"""
+from __future__ import annotations
+
import os.path
import types
import zipimport
+from typing import Dict, Iterable, Optional, Set, TYPE_CHECKING
+
from coverage import env
from coverage.exceptions import CoverageException, NoSource
-from coverage.files import canonical_filename, relative_filename
-from coverage.misc import contract, expensive, isolate_module, join_regex
+from coverage.files import canonical_filename, relative_filename, zip_location
+from coverage.misc import expensive, isolate_module, join_regex
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
+from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines
+
+if TYPE_CHECKING:
+ from coverage import Coverage
os = isolate_module(os)
-@contract(returns='bytes')
-def read_python_source(filename):
+def read_python_source(filename: str) -> bytes:
"""Read the Python source text from `filename`.
Returns bytes.
@@ -28,15 +35,10 @@ def read_python_source(filename):
with open(filename, "rb") as f:
source = f.read()
- if env.IRONPYTHON:
- # IronPython reads Unicode strings even for "rb" files.
- source = bytes(source)
-
return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
-@contract(returns='unicode')
-def get_python_source(filename):
+def get_python_source(filename: str) -> str:
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
@@ -44,34 +46,34 @@ def get_python_source(filename):
else:
exts = [ext]
+ source_bytes: Optional[bytes]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
- source = read_python_source(try_filename)
+ source_bytes = read_python_source(try_filename)
break
# Maybe it's in a zip file?
- source = get_zip_bytes(try_filename)
- if source is not None:
+ source_bytes = get_zip_bytes(try_filename)
+ if source_bytes is not None:
break
else:
# Couldn't find source.
raise NoSource(f"No source for code: '{filename}'.")
# Replace \f because of http://bugs.python.org/issue19035
- source = source.replace(b'\f', b' ')
- source = source.decode(source_encoding(source), "replace")
+ source_bytes = source_bytes.replace(b"\f", b" ")
+ source = source_bytes.decode(source_encoding(source_bytes), "replace")
# Python code should always end with a line with a newline.
- if source and source[-1] != '\n':
- source += '\n'
+ if source and source[-1] != "\n":
+ source += "\n"
return source
-@contract(returns='bytes|None')
-def get_zip_bytes(filename):
+def get_zip_bytes(filename: str) -> Optional[bytes]:
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
@@ -79,23 +81,22 @@ def get_zip_bytes(filename):
an empty string if the file is empty.
"""
- markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep]
- for marker in markers:
- if marker in filename:
- parts = filename.split(marker)
- try:
- zi = zipimport.zipimporter(parts[0]+marker[:-1])
- except zipimport.ZipImportError:
- continue
- try:
- data = zi.get_data(parts[1])
- except OSError:
- continue
- return data
+ zipfile_inner = zip_location(filename)
+ if zipfile_inner is not None:
+ zipfile, inner = zipfile_inner
+ try:
+ zi = zipimport.zipimporter(zipfile)
+ except zipimport.ZipImportError:
+ return None
+ try:
+ data = zi.get_data(inner)
+ except OSError:
+ return None
+ return data
return None
-def source_for_file(filename):
+def source_for_file(filename: str) -> str:
"""Return the source filename for `filename`.
Given a file name being traced, return the best guess as to the source
@@ -120,17 +121,13 @@ def source_for_file(filename):
# Didn't find source, but it's probably the .py file we want.
return py_filename
- elif filename.endswith("$py.class"):
- # Jython is easy to guess.
- return filename[:-9] + ".py"
-
# No idea, just use the file name as-is.
return filename
-def source_for_morf(morf):
+def source_for_morf(morf: TMorf) -> str:
"""Get the source filename for the module-or-file `morf`."""
- if hasattr(morf, '__file__') and morf.__file__:
+ if hasattr(morf, "__file__") and morf.__file__:
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
@@ -146,60 +143,68 @@ def source_for_morf(morf):
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
- def __init__(self, morf, coverage=None):
+ def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
self.coverage = coverage
filename = source_for_morf(morf)
- super().__init__(canonical_filename(filename))
+ fname = filename
+ canonicalize = True
+ if self.coverage is not None:
+ if self.coverage.config.relative_files:
+ canonicalize = False
+ if canonicalize:
+ fname = canonical_filename(filename)
+ super().__init__(fname)
- if hasattr(morf, '__name__'):
+ if hasattr(morf, "__name__"):
name = morf.__name__.replace(".", os.sep)
- if os.path.basename(filename).startswith('__init__.'):
+ if os.path.basename(filename).startswith("__init__."):
name += os.sep + "__init__"
name += ".py"
else:
name = relative_filename(filename)
self.relname = name
- self._source = None
- self._parser = None
+ self._source: Optional[str] = None
+ self._parser: Optional[PythonParser] = None
self._excluded = None
- def __repr__(self):
+ def __repr__(self) -> str:
return f""
- @contract(returns='unicode')
- def relative_filename(self):
+ def relative_filename(self) -> str:
return self.relname
@property
- def parser(self):
+ def parser(self) -> PythonParser:
"""Lazily create a :class:`PythonParser`."""
+ assert self.coverage is not None
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
- exclude=self.coverage._exclude_regex('exclude'),
+ exclude=self.coverage._exclude_regex("exclude"),
)
self._parser.parse_source()
return self._parser
- def lines(self):
+ def lines(self) -> Set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.statements
- def excluded_lines(self):
+ def excluded_lines(self) -> Set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.excluded
- def translate_lines(self, lines):
+ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
return self.parser.translate_lines(lines)
- def translate_arcs(self, arcs):
+ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
return self.parser.translate_arcs(arcs)
@expensive
- def no_branch_lines(self):
+ def no_branch_lines(self) -> Set[TLineNo]:
+ assert self.coverage is not None
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list),
@@ -207,23 +212,27 @@ def no_branch_lines(self):
return no_branch
@expensive
- def arcs(self):
+ def arcs(self) -> Set[TArc]:
return self.parser.arcs()
@expensive
- def exit_counts(self):
+ def exit_counts(self) -> Dict[TLineNo, int]:
return self.parser.exit_counts()
- def missing_arc_description(self, start, end, executed_arcs=None):
+ def missing_arc_description(
+ self,
+ start: TLineNo,
+ end: TLineNo,
+ executed_arcs: Optional[Iterable[TArc]] = None,
+ ) -> str:
return self.parser.missing_arc_description(start, end, executed_arcs)
- @contract(returns='unicode')
- def source(self):
+ def source(self) -> str:
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
- def should_be_python(self):
+ def should_be_python(self) -> bool:
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
@@ -235,7 +244,7 @@ def should_be_python(self):
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
- if ext.startswith('.py'):
+ if ext.startswith(".py"):
return True
# A file with no extension should be Python.
if not ext:
@@ -243,5 +252,5 @@ def should_be_python(self):
# Everything else is probably not Python.
return False
- def source_token_lines(self):
+ def source_token_lines(self) -> TSourceTokenLines:
return source_token_lines(self.source())
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index 4f138074b..81832b0fd 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -3,18 +3,28 @@
"""Raw data collector for coverage.py."""
+from __future__ import annotations
+
import atexit
import dis
import sys
+import threading
+
+from types import FrameType, ModuleType
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast
from coverage import env
+from coverage.types import (
+ TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TTraceFn,
+ TTracer, TWarnFn,
+)
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
-RESUME = dis.opmap.get('RESUME')
-RETURN_VALUE = dis.opmap['RETURN_VALUE']
+RESUME = dis.opmap.get("RESUME")
+RETURN_VALUE = dis.opmap["RETURN_VALUE"]
if RESUME is None:
- YIELD_VALUE = dis.opmap['YIELD_VALUE']
- YIELD_FROM = dis.opmap['YIELD_FROM']
+ YIELD_VALUE = dis.opmap["YIELD_VALUE"]
+ YIELD_FROM = dis.opmap["YIELD_FROM"]
YIELD_FROM_OFFSET = 0 if env.PYPY else 2
# When running meta-coverage, this file can try to trace itself, which confuses
@@ -22,7 +32,7 @@
THIS_FILE = __file__.rstrip("co")
-class PyTracer:
+class PyTracer(TTracer):
"""Python implementation of the raw data tracer."""
# Because of poor implementations of trace-function-manipulating tools,
@@ -41,44 +51,46 @@ class PyTracer:
# PyTracer to get accurate results. The command-line --timid argument is
# used to force the use of this tracer.
- def __init__(self):
+ def __init__(self) -> None:
+ # pylint: disable=super-init-not-called
# Attributes set from the collector:
- self.data = None
+ self.data: TTraceData
self.trace_arcs = False
- self.should_trace = None
- self.should_trace_cache = None
- self.should_start_context = None
- self.warn = None
+ self.should_trace: Callable[[str, FrameType], TFileDisposition]
+ self.should_trace_cache: Dict[str, Optional[TFileDisposition]]
+ self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None
+ self.switch_context: Optional[Callable[[Optional[str]], None]] = None
+ self.warn: TWarnFn
+
# The threading module to use, if any.
- self.threading = None
+ self.threading: Optional[ModuleType] = None
- self.cur_file_data = None
- self.last_line = 0 # int, but uninitialized.
- self.cur_file_name = None
- self.context = None
+ self.cur_file_data: Optional[TTraceFileData] = None
+ self.last_line: TLineNo = 0
+ self.cur_file_name: Optional[str] = None
+ self.context: Optional[str] = None
self.started_context = False
- self.data_stack = []
- self.thread = None
+ self.data_stack: List[Tuple[Optional[TTraceFileData], Optional[str], TLineNo, bool]] = []
+ self.thread: Optional[threading.Thread] = None
self.stopped = False
self._activity = False
self.in_atexit = False
# On exit, self.in_atexit = True
- atexit.register(setattr, self, 'in_atexit', True)
+ atexit.register(setattr, self, "in_atexit", True)
# Cache a bound method on the instance, so that we don't have to
# re-create a bound method object all the time.
- self._cached_bound_method_trace = self._trace
+ self._cached_bound_method_trace: TTraceFn = self._trace
- def __repr__(self):
- return "".format(
- id(self),
- sum(len(v) for v in self.data.values()),
- len(self.data),
- )
+ def __repr__(self) -> str:
+ me = id(self)
+ points = sum(len(v) for v in self.data.values())
+ files = len(self.data)
+ return f""
- def log(self, marker, *args):
+ def log(self, marker: str, *args: Any) -> None:
"""For hard-core logging of what this tracer is doing."""
with open("/tmp/debug_trace.txt", "a") as f:
f.write("{} {}[{}]".format(
@@ -87,13 +99,13 @@ def log(self, marker, *args):
len(self.data_stack),
))
if 0: # if you want thread ids..
- f.write(".{:x}.{:x}".format(
+ f.write(".{:x}.{:x}".format( # type: ignore[unreachable]
self.thread.ident,
self.threading.current_thread().ident,
))
f.write(" {}".format(" ".join(map(str, args))))
if 0: # if you want callers..
- f.write(" | ")
+ f.write(" | ") # type: ignore[unreachable]
stack = " / ".join(
(fname or "???").rpartition("/")[-1]
for _, fname, _, _ in self.data_stack
@@ -101,7 +113,13 @@ def log(self, marker, *args):
f.write(stack)
f.write("\n")
- def _trace(self, frame, event, arg_unused):
+ def _trace(
+ self,
+ frame: FrameType,
+ event: str,
+ arg: Any, # pylint: disable=unused-argument
+ lineno: Optional[TLineNo] = None, # pylint: disable=unused-argument
+ ) -> Optional[TTraceFn]:
"""The trace function passed to sys.settrace."""
if THIS_FILE in frame.f_code.co_filename:
@@ -113,27 +131,36 @@ def _trace(self, frame, event, arg_unused):
# The PyTrace.stop() method has been called, possibly by another
# thread, let's deactivate ourselves now.
if 0:
- self.log("---\nX", frame.f_code.co_filename, frame.f_lineno)
- f = frame
+ f = frame # type: ignore[unreachable]
+ self.log("---\nX", f.f_code.co_filename, f.f_lineno)
while f:
self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
f = f.f_back
sys.settrace(None)
- self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
+ try:
+ self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
+ self.data_stack.pop()
+ )
+ except IndexError:
+ self.log(
+ "Empty stack!",
+ frame.f_code.co_filename,
+ frame.f_lineno,
+ frame.f_code.co_name
+ )
return None
- # if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
+ # if event != "call" and frame.f_code.co_filename != self.cur_file_name:
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
- if event == 'call':
+ if event == "call":
# Should we start a new context?
if self.should_start_context and self.context is None:
context_maybe = self.should_start_context(frame)
if context_maybe is not None:
self.context = context_maybe
started_context = True
+ assert self.switch_context is not None
self.switch_context(self.context)
else:
started_context = False
@@ -155,7 +182,7 @@ def _trace(self, frame, event, arg_unused):
# Improve tracing performance: when calling a function, both caller
# and callee are often within the same file. if that's the case, we
# don't have to re-check whether to trace the corresponding
- # function (which is a little bit espensive since it involves
+ # function (which is a little bit expensive since it involves
# dictionary lookups). This optimization is only correct if we
# didn't start a context.
filename = frame.f_code.co_filename
@@ -169,8 +196,9 @@ def _trace(self, frame, event, arg_unused):
self.cur_file_data = None
if disp.trace:
tracename = disp.source_filename
+ assert tracename is not None
if tracename not in self.data:
- self.data[tracename] = set()
+ self.data[tracename] = set() # type: ignore[assignment]
self.cur_file_data = self.data[tracename]
else:
frame.f_trace_lines = False
@@ -187,24 +215,24 @@ def _trace(self, frame, event, arg_unused):
oparg = frame.f_code.co_code[frame.f_lasti + 1]
real_call = (oparg == 0)
else:
- real_call = (getattr(frame, 'f_lasti', -1) < 0)
+ real_call = (getattr(frame, "f_lasti", -1) < 0)
if real_call:
self.last_line = -frame.f_code.co_firstlineno
else:
self.last_line = frame.f_lineno
- elif event == 'line':
+ elif event == "line":
# Record an executed line.
if self.cur_file_data is not None:
- lineno = frame.f_lineno
+ flineno: TLineNo = frame.f_lineno
if self.trace_arcs:
- self.cur_file_data.add((self.last_line, lineno))
+ cast(Set[TArc], self.cur_file_data).add((self.last_line, flineno))
else:
- self.cur_file_data.add(lineno)
- self.last_line = lineno
+ cast(Set[TLineNo], self.cur_file_data).add(flineno)
+ self.last_line = flineno
- elif event == 'return':
+ elif event == "return":
if self.trace_arcs and self.cur_file_data:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
@@ -230,7 +258,7 @@ def _trace(self, frame, event, arg_unused):
real_return = True
if real_return:
first = frame.f_code.co_firstlineno
- self.cur_file_data.add((self.last_line, -first))
+ cast(Set[TArc], self.cur_file_data).add((self.last_line, -first))
# Leaving this function, pop the filename stack.
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
@@ -238,11 +266,12 @@ def _trace(self, frame, event, arg_unused):
)
# Leaving a context?
if self.started_context:
+ assert self.switch_context is not None
self.context = None
self.switch_context(None)
return self._cached_bound_method_trace
- def start(self):
+ def start(self) -> TTraceFn:
"""Start this Tracer.
Return a Python function suitable for use with sys.settrace().
@@ -263,7 +292,7 @@ def start(self):
sys.settrace(self._cached_bound_method_trace)
return self._cached_bound_method_trace
- def stop(self):
+ def stop(self) -> None:
"""Stop this Tracer."""
# Get the active tracer callback before setting the stop flag to be
# able to detect if the tracer was changed prior to stopping it.
@@ -274,12 +303,14 @@ def stop(self):
# right thread.
self.stopped = True
- if self.threading and self.thread.ident != self.threading.current_thread().ident:
- # Called on a different thread than started us: we can't unhook
- # ourselves, but we've set the flag that we should stop, so we
- # won't do any more tracing.
- #self.log("~", "stopping on different threads")
- return
+ if self.threading:
+ assert self.thread is not None
+ if self.thread.ident != self.threading.current_thread().ident:
+ # Called on a different thread than started us: we can't unhook
+ # ourselves, but we've set the flag that we should stop, so we
+ # won't do any more tracing.
+ #self.log("~", "stopping on different threads")
+ return
if self.warn:
# PyPy clears the trace function before running atexit functions,
@@ -293,14 +324,14 @@ def stop(self):
slug="trace-changed",
)
- def activity(self):
+ def activity(self) -> bool:
"""Has there been any activity?"""
return self._activity
- def reset_activity(self):
+ def reset_activity(self) -> None:
"""Reset the activity() flag."""
self._activity = False
- def get_stats(self):
+ def get_stats(self) -> Optional[Dict[str, int]]:
"""Return a dictionary of statistics, or None."""
return None
diff --git a/coverage/report.py b/coverage/report.py
index 6382eb515..e1c7a071d 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -1,91 +1,281 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-"""Reporter foundation for coverage.py."""
+"""Summary reporting"""
+
+from __future__ import annotations
import sys
-from coverage.exceptions import CoverageException, NoDataError, NotPython
-from coverage.files import prep_patterns, FnmatchMatcher
-from coverage.misc import ensure_dir_for_file, file_be_gone
-
-
-def render_report(output_path, reporter, morfs, msgfn):
- """Run a one-file report generator, managing the output file.
-
- This function ensures the output file is ready to be written to. Then writes
- the report to it. Then closes the file and cleans up.
-
- """
- file_to_close = None
- delete_file = False
-
- if output_path == "-":
- outfile = sys.stdout
- else:
- # Ensure that the output directory is created; done here
- # because this report pre-opens the output file.
- # HTMLReport does this using the Report plumbing because
- # its task is more complex, being multiple files.
- ensure_dir_for_file(output_path)
- outfile = open(output_path, "w", encoding="utf-8")
- file_to_close = outfile
-
- try:
- return reporter.report(morfs, outfile=outfile)
- except CoverageException:
- delete_file = True
- raise
- finally:
- if file_to_close:
- file_to_close.close()
- if delete_file:
- file_be_gone(output_path) # pragma: part covered (doesn't return)
- else:
- msgfn(f"Wrote {reporter.report_type} to {output_path}")
-
-
-def get_analysis_to_report(coverage, morfs):
- """Get the files to report on.
-
- For each morf in `morfs`, if it should be reported on (based on the omit
- and include configuration options), yield a pair, the `FileReporter` and
- `Analysis` for the morf.
-
- """
- file_reporters = coverage._get_file_reporters(morfs)
- config = coverage.config
-
- if config.report_include:
- matcher = FnmatchMatcher(prep_patterns(config.report_include), "report_include")
- file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
-
- if config.report_omit:
- matcher = FnmatchMatcher(prep_patterns(config.report_omit), "report_omit")
- file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
-
- if not file_reporters:
- raise NoDataError("No data to report.")
-
- for fr in sorted(file_reporters):
- try:
- analysis = coverage._analyze(fr)
- except NotPython:
- # Only report errors for .py files, and only if we didn't
- # explicitly suppress those errors.
- # NotPython is only raised by PythonFileReporter, which has a
- # should_be_python() method.
- if fr.should_be_python():
- if config.ignore_errors:
- msg = f"Couldn't parse Python file '{fr.filename}'"
- coverage._warn(msg, slug="couldnt-parse")
- else:
- raise
- except Exception as exc:
- if config.ignore_errors:
- msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
- coverage._warn(msg, slug="couldnt-parse")
+from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
+
+from coverage.exceptions import ConfigError, NoDataError
+from coverage.misc import human_sorted_items
+from coverage.plugin import FileReporter
+from coverage.report_core import get_analysis_to_report
+from coverage.results import Analysis, Numbers
+from coverage.types import TMorf
+
+if TYPE_CHECKING:
+ from coverage import Coverage
+
+
+class SummaryReporter:
+ """A reporter for writing the summary report."""
+
+ def __init__(self, coverage: Coverage) -> None:
+ self.coverage = coverage
+ self.config = self.coverage.config
+ self.branches = coverage.get_data().has_arcs()
+ self.outfile: Optional[IO[str]] = None
+ self.output_format = self.config.format or "text"
+ if self.output_format not in {"text", "markdown", "total"}:
+ raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
+ self.fr_analysis: List[Tuple[FileReporter, Analysis]] = []
+ self.skipped_count = 0
+ self.empty_count = 0
+ self.total = Numbers(precision=self.config.precision)
+
+ def write(self, line: str) -> None:
+ """Write a line to the output, adding a newline."""
+ assert self.outfile is not None
+ self.outfile.write(line.rstrip())
+ self.outfile.write("\n")
+
+ def write_items(self, items: Iterable[str]) -> None:
+ """Write a list of strings, joined together."""
+ self.write("".join(items))
+
+ def _report_text(
+ self,
+ header: List[str],
+ lines_values: List[List[Any]],
+ total_line: List[Any],
+ end_lines: List[str],
+ ) -> None:
+ """Internal method that prints report data in text format.
+
+ `header` is a list with captions.
+ `lines_values` is list of lists of sortable values.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
+ max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
+ max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
+ formats = dict(
+ Name="{:{name_len}}",
+ Stmts="{:>7}",
+ Miss="{:>7}",
+ Branch="{:>7}",
+ BrPart="{:>7}",
+ Cover="{:>{n}}",
+ Missing="{:>10}",
+ )
+ header_items = [
+ formats[item].format(item, name_len=max_name, n=max_n)
+ for item in header
+ ]
+ header_str = "".join(header_items)
+ rule = "-" * len(header_str)
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule)
+
+ formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
+ for values in lines_values:
+ # build string with line values
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write a TOTAL line
+ if lines_values:
+ self.write(rule)
+
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
+ ]
+ self.write_items(line_items)
+
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def _report_markdown(
+ self,
+ header: List[str],
+ lines_values: List[List[Any]],
+ total_line: List[Any],
+ end_lines: List[str],
+ ) -> None:
+ """Internal method that prints report data in markdown format.
+
+ `header` is a list with captions.
+ `lines_values` is a sorted list of lists containing coverage information.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
+ max_name = max(max_name, len("**TOTAL**")) + 1
+ formats = dict(
+ Name="| {:{name_len}}|",
+ Stmts="{:>9} |",
+ Miss="{:>9} |",
+ Branch="{:>9} |",
+ BrPart="{:>9} |",
+ Cover="{:>{n}} |",
+ Missing="{:>10} |",
+ )
+ max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
+ header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
+ header_str = "".join(header_items)
+ rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
+ ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]]
+ )
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule_str)
+
+ for values in lines_values:
+ # build string with line values
+ formats.update(dict(Cover="{:>{n}}% |"))
+ line_items = [
+ formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
+ for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write the TOTAL line
+ formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
+ total_line_items: List[str] = []
+ for item, value in zip(header, total_line):
+ if value == "":
+ insert = value
+ elif item == "Cover":
+ insert = f" **{value}%**"
else:
- raise
+ insert = f" **{value}**"
+ total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
+ self.write_items(total_line_items)
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
+ """Writes a report summarizing coverage statistics per module.
+
+ `outfile` is a text-mode file object to write the summary to.
+
+ """
+ self.outfile = outfile or sys.stdout
+
+ self.coverage.get_data().set_query_contexts(self.config.report_contexts)
+ for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+ self.report_one_file(fr, analysis)
+
+ if not self.total.n_files and not self.skipped_count:
+ raise NoDataError("No data to report.")
+
+ if self.output_format == "total":
+ self.write(self.total.pc_covered_str)
+ else:
+ self.tabular_report()
+
+ return self.total.pc_covered
+
+ def tabular_report(self) -> None:
+ """Writes tabular report formats."""
+ # Prepare the header line and column sorting.
+ header = ["Name", "Stmts", "Miss"]
+ if self.branches:
+ header += ["Branch", "BrPart"]
+ header += ["Cover"]
+ if self.config.show_missing:
+ header += ["Missing"]
+
+ column_order = dict(name=0, stmts=1, miss=2, cover=-1)
+ if self.branches:
+ column_order.update(dict(branch=3, brpart=4))
+
+ # `lines_values` is list of lists of sortable values.
+ lines_values = []
+
+ for (fr, analysis) in self.fr_analysis:
+ nums = analysis.numbers
+
+ args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
+ if self.branches:
+ args += [nums.n_branches, nums.n_partial_branches]
+ args += [nums.pc_covered_str]
+ if self.config.show_missing:
+ args += [analysis.missing_formatted(branches=True)]
+ args += [nums.pc_covered]
+ lines_values.append(args)
+
+ # Line sorting.
+ sort_option = (self.config.sort or "name").lower()
+ reverse = False
+ if sort_option[0] == "-":
+ reverse = True
+ sort_option = sort_option[1:]
+ elif sort_option[0] == "+":
+ sort_option = sort_option[1:]
+ sort_idx = column_order.get(sort_option)
+ if sort_idx is None:
+ raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
+ if sort_option == "name":
+ lines_values = human_sorted_items(lines_values, reverse=reverse)
+ else:
+ lines_values.sort(
+ key=lambda line: (line[sort_idx], line[0]), # type: ignore[index]
+ reverse=reverse,
+ )
+
+ # Calculate total if we had at least one file.
+ total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
+ if self.branches:
+ total_line += [self.total.n_branches, self.total.n_partial_branches]
+ total_line += [self.total.pc_covered_str]
+ if self.config.show_missing:
+ total_line += [""]
+
+ # Create other final lines.
+ end_lines = []
+ if self.config.skip_covered and self.skipped_count:
+ file_suffix = "s" if self.skipped_count>1 else ""
+ end_lines.append(
+ f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
+ )
+ if self.config.skip_empty and self.empty_count:
+ file_suffix = "s" if self.empty_count > 1 else ""
+ end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
+
+ if self.output_format == "markdown":
+ formatter = self._report_markdown
+ else:
+ formatter = self._report_text
+ formatter(header, lines_values, total_line, end_lines)
+
+ def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
+ """Report on just one file, the callback from report()."""
+ nums = analysis.numbers
+ self.total += nums
+
+ no_missing_lines = (nums.n_missing == 0)
+ no_missing_branches = (nums.n_partial_branches == 0)
+ if self.config.skip_covered and no_missing_lines and no_missing_branches:
+ # Don't report on 100% files.
+ self.skipped_count += 1
+ elif self.config.skip_empty and nums.n_statements == 0:
+ # Don't report on empty files.
+ self.empty_count += 1
else:
- yield (fr, analysis)
+ self.fr_analysis.append((fr, analysis))
diff --git a/coverage/report_core.py b/coverage/report_core.py
new file mode 100644
index 000000000..09eed0a82
--- /dev/null
+++ b/coverage/report_core.py
@@ -0,0 +1,117 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Reporter foundation for coverage.py."""
+
+from __future__ import annotations
+
+import sys
+
+from typing import Callable, Iterable, Iterator, IO, Optional, Tuple, TYPE_CHECKING
+
+from coverage.exceptions import NoDataError, NotPython
+from coverage.files import prep_patterns, GlobMatcher
+from coverage.misc import ensure_dir_for_file, file_be_gone
+from coverage.plugin import FileReporter
+from coverage.results import Analysis
+from coverage.types import Protocol, TMorf
+
+if TYPE_CHECKING:
+ from coverage import Coverage
+
+
+class Reporter(Protocol):
+ """What we expect of reporters."""
+
+ report_type: str
+
+ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ """Generate a report of `morfs`, written to `outfile`."""
+
+
+def render_report(
+ output_path: str,
+ reporter: Reporter,
+ morfs: Optional[Iterable[TMorf]],
+ msgfn: Callable[[str], None],
+) -> float:
+ """Run a one-file report generator, managing the output file.
+
+ This function ensures the output file is ready to be written to. Then writes
+ the report to it. Then closes the file and cleans up.
+
+ """
+ file_to_close = None
+ delete_file = False
+
+ if output_path == "-":
+ outfile = sys.stdout
+ else:
+ # Ensure that the output directory is created; done here because this
+ # report pre-opens the output file. HtmlReporter does this on its own
+ # because its task is more complex, being multiple files.
+ ensure_dir_for_file(output_path)
+ outfile = open(output_path, "w", encoding="utf-8")
+ file_to_close = outfile
+ delete_file = True
+
+ try:
+ ret = reporter.report(morfs, outfile=outfile)
+ if file_to_close is not None:
+ msgfn(f"Wrote {reporter.report_type} to {output_path}")
+ delete_file = False
+ return ret
+ finally:
+ if file_to_close is not None:
+ file_to_close.close()
+ if delete_file:
+ file_be_gone(output_path) # pragma: part covered (doesn't return)
+
+
+def get_analysis_to_report(
+ coverage: Coverage,
+ morfs: Optional[Iterable[TMorf]],
+) -> Iterator[Tuple[FileReporter, Analysis]]:
+ """Get the files to report on.
+
+ For each morf in `morfs`, if it should be reported on (based on the omit
+ and include configuration options), yield a pair, the `FileReporter` and
+ `Analysis` for the morf.
+
+ """
+ file_reporters = coverage._get_file_reporters(morfs)
+ config = coverage.config
+
+ if config.report_include:
+ matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
+ file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
+
+ if config.report_omit:
+ matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
+ file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
+
+ if not file_reporters:
+ raise NoDataError("No data to report.")
+
+ for fr in sorted(file_reporters):
+ try:
+ analysis = coverage._analyze(fr)
+ except NotPython:
+ # Only report errors for .py files, and only if we didn't
+ # explicitly suppress those errors.
+ # NotPython is only raised by PythonFileReporter, which has a
+ # should_be_python() method.
+ if fr.should_be_python(): # type: ignore[attr-defined]
+ if config.ignore_errors:
+ msg = f"Couldn't parse Python file '{fr.filename}'"
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ except Exception as exc:
+ if config.ignore_errors:
+ msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ else:
+ yield (fr, analysis)
diff --git a/coverage/results.py b/coverage/results.py
index 79439fd9b..ea6dc207f 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -3,17 +3,32 @@
"""Results of coverage measurement."""
+from __future__ import annotations
+
import collections
-from coverage.debug import SimpleReprMixin
+from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING
+
+from coverage.debug import AutoReprMixin
from coverage.exceptions import ConfigError
-from coverage.misc import contract, nice_pair
+from coverage.misc import nice_pair
+from coverage.types import TArc, TLineNo
+
+if TYPE_CHECKING:
+ from coverage.data import CoverageData
+ from coverage.plugin import FileReporter
class Analysis:
"""The results of analyzing a FileReporter."""
- def __init__(self, data, precision, file_reporter, file_mapper):
+ def __init__(
+ self,
+ data: CoverageData,
+ precision: int,
+ file_reporter: FileReporter,
+ file_mapper: Callable[[str], str],
+ ) -> None:
self.data = data
self.file_reporter = file_reporter
self.filename = file_mapper(self.file_reporter.filename)
@@ -21,6 +36,7 @@ def __init__(self, data, precision, file_reporter, file_mapper):
self.excluded = self.file_reporter.excluded_lines()
# Identify missing statements.
+ executed: Iterable[TLineNo]
executed = self.data.lines(self.filename) or []
executed = self.file_reporter.translate_lines(executed)
self.executed = executed
@@ -51,7 +67,7 @@ def __init__(self, data, precision, file_reporter, file_mapper):
n_missing_branches=n_missing_branches,
)
- def missing_formatted(self, branches=False):
+ def missing_formatted(self, branches: bool = False) -> str:
"""The missing line numbers, formatted nicely.
Returns a string like "1-2, 5-11, 13-14".
@@ -66,25 +82,23 @@ def missing_formatted(self, branches=False):
return format_lines(self.statements, self.missing, arcs=arcs)
- def has_arcs(self):
+ def has_arcs(self) -> bool:
"""Were arcs measured in this result?"""
return self.data.has_arcs()
- @contract(returns='list(tuple(int, int))')
- def arc_possibilities(self):
+ def arc_possibilities(self) -> List[TArc]:
"""Returns a sorted list of the arcs in the code."""
return self._arc_possibilities
- @contract(returns='list(tuple(int, int))')
- def arcs_executed(self):
+ def arcs_executed(self) -> List[TArc]:
"""Returns a sorted list of the arcs actually executed in the code."""
+ executed: Iterable[TArc]
executed = self.data.arcs(self.filename) or []
executed = self.file_reporter.translate_arcs(executed)
return sorted(executed)
- @contract(returns='list(tuple(int, int))')
- def arcs_missing(self):
- """Returns a sorted list of the unexecuted arcs in the code."""
+ def arcs_missing(self) -> List[TArc]:
+ """Returns a sorted list of the un-executed arcs in the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = (
@@ -95,8 +109,7 @@ def arcs_missing(self):
)
return sorted(missing)
- @contract(returns='list(tuple(int, int))')
- def arcs_unpredicted(self):
+ def arcs_unpredicted(self) -> List[TArc]:
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
@@ -113,16 +126,15 @@ def arcs_unpredicted(self):
)
return sorted(unpredicted)
- def _branch_lines(self):
+ def _branch_lines(self) -> List[TLineNo]:
"""Returns a list of line numbers that have more than one exit."""
return [l1 for l1,count in self.exit_counts.items() if count > 1]
- def _total_branches(self):
+ def _total_branches(self) -> int:
"""How many total branches are there?"""
return sum(count for count in self.exit_counts.values() if count > 1)
- @contract(returns='dict(int: list(int))')
- def missing_branch_arcs(self):
+ def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
@@ -136,8 +148,7 @@ def missing_branch_arcs(self):
mba[l1].append(l2)
return mba
- @contract(returns='dict(int: list(int))')
- def executed_branch_arcs(self):
+ def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
"""Return arcs that were executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
@@ -151,8 +162,7 @@ def executed_branch_arcs(self):
eba[l1].append(l2)
return eba
- @contract(returns='dict(int: tuple(int, int))')
- def branch_stats(self):
+ def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]:
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
@@ -168,7 +178,7 @@ def branch_stats(self):
return stats
-class Numbers(SimpleReprMixin):
+class Numbers(AutoReprMixin):
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
@@ -176,11 +186,17 @@ class Numbers(SimpleReprMixin):
"""
- def __init__(self,
- precision=0,
- n_files=0, n_statements=0, n_excluded=0, n_missing=0,
- n_branches=0, n_partial_branches=0, n_missing_branches=0
- ):
+ def __init__(
+ self,
+ precision: int = 0,
+ n_files: int = 0,
+ n_statements: int = 0,
+ n_excluded: int = 0,
+ n_missing: int = 0,
+ n_branches: int = 0,
+ n_partial_branches: int = 0,
+ n_missing_branches: int = 0,
+ ) -> None:
assert 0 <= precision < 10
self._precision = precision
self._near0 = 1.0 / 10**precision
@@ -193,7 +209,7 @@ def __init__(self,
self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
- def init_args(self):
+ def init_args(self) -> List[int]:
"""Return a list for __init__(*args) to recreate this object."""
return [
self._precision,
@@ -202,17 +218,17 @@ def init_args(self):
]
@property
- def n_executed(self):
+ def n_executed(self) -> int:
"""Returns the number of executed statements."""
return self.n_statements - self.n_missing
@property
- def n_executed_branches(self):
+ def n_executed_branches(self) -> int:
"""Returns the number of executed branches."""
return self.n_branches - self.n_missing_branches
@property
- def pc_covered(self):
+ def pc_covered(self) -> float:
"""Returns a single percentage value for coverage."""
if self.n_statements > 0:
numerator, denominator = self.ratio_covered
@@ -222,7 +238,7 @@ def pc_covered(self):
return pc_cov
@property
- def pc_covered_str(self):
+ def pc_covered_str(self) -> str:
"""Returns the percent covered, as a string, without a percent sign.
Note that "0" is only returned when the value is truly zero, and "100"
@@ -232,7 +248,7 @@ def pc_covered_str(self):
"""
return self.display_covered(self.pc_covered)
- def display_covered(self, pc):
+ def display_covered(self, pc: float) -> str:
"""Return a displayable total percentage, as a string.
Note that "0" is only returned when the value is truly zero, and "100"
@@ -248,7 +264,7 @@ def display_covered(self, pc):
pc = round(pc, self._precision)
return "%.*f" % (self._precision, pc)
- def pc_str_width(self):
+ def pc_str_width(self) -> int:
"""How many characters wide can pc_covered_str be?"""
width = 3 # "100"
if self._precision > 0:
@@ -256,13 +272,13 @@ def pc_str_width(self):
return width
@property
- def ratio_covered(self):
+ def ratio_covered(self) -> Tuple[int, int]:
"""Return a numerator and denominator for the coverage ratio."""
numerator = self.n_executed + self.n_executed_branches
denominator = self.n_statements + self.n_branches
return numerator, denominator
- def __add__(self, other):
+ def __add__(self, other: Numbers) -> Numbers:
nums = Numbers(precision=self._precision)
nums.n_files = self.n_files + other.n_files
nums.n_statements = self.n_statements + other.n_statements
@@ -277,13 +293,16 @@ def __add__(self, other):
)
return nums
- def __radd__(self, other):
+ def __radd__(self, other: int) -> Numbers:
# Implementing 0+Numbers allows us to sum() a list of Numbers.
assert other == 0 # we only ever call it this way.
return self
-def _line_ranges(statements, lines):
+def _line_ranges(
+ statements: Iterable[TLineNo],
+ lines: Iterable[TLineNo],
+) -> List[Tuple[TLineNo, TLineNo]]:
"""Produce a list of ranges for `format_lines`."""
statements = sorted(statements)
lines = sorted(lines)
@@ -307,7 +326,11 @@ def _line_ranges(statements, lines):
return pairs
-def format_lines(statements, lines, arcs=None):
+def format_lines(
+ statements: Iterable[TLineNo],
+ lines: Iterable[TLineNo],
+ arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]] = None,
+) -> str:
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
@@ -326,7 +349,7 @@ def format_lines(statements, lines, arcs=None):
"""
line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
- if arcs:
+ if arcs is not None:
line_exits = sorted(arcs)
for line, exits in line_exits:
for ex in sorted(exits):
@@ -334,12 +357,11 @@ def format_lines(statements, lines, arcs=None):
dest = (ex if ex > 0 else "exit")
line_items.append((line, f"{line}->{dest}"))
- ret = ', '.join(t[-1] for t in sorted(line_items))
+ ret = ", ".join(t[-1] for t in sorted(line_items))
return ret
-@contract(total='number', fail_under='number', precision=int, returns=bool)
-def should_fail_under(total, fail_under, precision):
+def should_fail_under(total: float, fail_under: float, precision: int) -> bool:
"""Determine if a total should fail due to fail-under.
`total` is a float, the coverage measurement total. `fail_under` is the
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index 8d2ed73a5..42cf4501d 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -3,7 +3,10 @@
"""SQLite coverage data."""
+from __future__ import annotations
+
import collections
+import contextlib
import datetime
import functools
import glob
@@ -18,16 +21,22 @@
import threading
import zlib
-from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr
+from typing import (
+ cast, Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping,
+ Optional, Sequence, Set, Tuple, TypeVar, Union,
+)
+
+from coverage.debug import NoDebugging, AutoReprMixin, clipped_repr
from coverage.exceptions import CoverageException, DataError
from coverage.files import PathAliases
-from coverage.misc import contract, file_be_gone, isolate_module
+from coverage.misc import file_be_gone, isolate_module
from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
+from coverage.types import FilePath, TArc, TDebugCtl, TLineNo, TWarnFn
from coverage.version import __version__
os = isolate_module(os)
-# If you change the schema, increment the SCHEMA_VERSION, and update the
+# If you change the schema: increment the SCHEMA_VERSION and update the
# docs in docs/dbschema.rst by running "make cogdoc".
SCHEMA_VERSION = 7
@@ -52,7 +61,7 @@
key text,
value text,
unique (key)
- -- Keys:
+ -- Possible keys:
-- 'has_arcs' boolean -- Is this data recording branches?
-- 'sys_argv' text -- The coverage command line that recorded the data.
-- 'version' text -- The version of coverage.py that made the file.
@@ -103,7 +112,22 @@
);
"""
-class CoverageData(SimpleReprMixin):
+TMethod = TypeVar("TMethod", bound=Callable[..., Any])
+
+def _locked(method: TMethod) -> TMethod:
+ """A decorator for methods that should hold self._lock."""
+ @functools.wraps(method)
+ def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any:
+ if self._debug.should("lock"):
+ self._debug.write(f"Locking {self._lock!r} for {method.__name__}")
+ with self._lock:
+ if self._debug.should("lock"):
+ self._debug.write(f"Locked {self._lock!r} for {method.__name__}")
+ return method(self, *args, **kwargs)
+ return _wrapped # type: ignore[return-value]
+
+
+class CoverageData(AutoReprMixin):
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data that coverage.py
@@ -173,9 +197,11 @@ class CoverageData(SimpleReprMixin):
Write the data to its file with :meth:`write`.
- You can clear the data in memory with :meth:`erase`. Two data collections
- can be combined by using :meth:`update` on one :class:`CoverageData`,
- passing it the other.
+ You can clear the data in memory with :meth:`erase`. Data for specific
+ files can be removed from the database with :meth:`purge_files`.
+
+ Two data collections can be combined by using :meth:`update` on one
+ :class:`CoverageData`, passing it the other.
Data in a :class:`CoverageData` can be serialized and deserialized with
:meth:`dumps` and :meth:`loads`.
@@ -186,7 +212,14 @@ class CoverageData(SimpleReprMixin):
"""
- def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None):
+ def __init__(
+ self,
+ basename: Optional[FilePath] = None,
+ suffix: Optional[Union[str, bool]] = None,
+ no_disk: bool = False,
+ warn: Optional[TWarnFn] = None,
+ debug: Optional[TDebugCtl] = None,
+ ) -> None:
"""Create a :class:`CoverageData` object to hold coverage-measured data.
Arguments:
@@ -208,9 +241,10 @@ def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=N
self._debug = debug or NoDebugging()
self._choose_filename()
- self._file_map = {}
+ # Maps filenames to row ids.
+ self._file_map: Dict[str, int] = {}
# Maps thread ids to SqliteDb objects.
- self._dbs = {}
+ self._dbs: Dict[int, SqliteDb] = {}
self._pid = os.getpid()
# Synchronize the operations used during collection.
self._lock = threading.RLock()
@@ -221,24 +255,11 @@ def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=N
self._has_lines = False
self._has_arcs = False
- self._current_context = None
- self._current_context_id = None
- self._query_context_ids = None
+ self._current_context: Optional[str] = None
+ self._current_context_id: Optional[int] = None
+ self._query_context_ids: Optional[List[int]] = None
- def _locked(method): # pylint: disable=no-self-argument
- """A decorator for methods that should hold self._lock."""
- @functools.wraps(method)
- def _wrapped(self, *args, **kwargs):
- if self._debug.should("lock"):
- self._debug.write(f"Locking {self._lock!r} for {method.__name__}")
- with self._lock:
- if self._debug.should("lock"):
- self._debug.write(f"Locked {self._lock!r} for {method.__name__}")
- # pylint: disable=not-callable
- return method(self, *args, **kwargs)
- return _wrapped
-
- def _choose_filename(self):
+ def _choose_filename(self) -> None:
"""Set self._filename based on inited attributes."""
if self._no_disk:
self._filename = ":memory:"
@@ -248,7 +269,7 @@ def _choose_filename(self):
if suffix:
self._filename += "." + suffix
- def _reset(self):
+ def _reset(self) -> None:
"""Reset our attributes."""
if not self._no_disk:
for db in self._dbs.values():
@@ -258,18 +279,19 @@ def _reset(self):
self._have_used = False
self._current_context_id = None
- def _open_db(self):
+ def _open_db(self) -> None:
"""Open an existing db file, and read its metadata."""
if self._debug.should("dataio"):
self._debug.write(f"Opening data file {self._filename!r}")
self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug)
self._read_db()
- def _read_db(self):
+ def _read_db(self) -> None:
"""Read the metadata from a database so that we are ready to use it."""
with self._dbs[threading.get_ident()] as db:
try:
- schema_version, = db.execute_one("select version from coverage_schema")
+ row = db.execute_one("select version from coverage_schema")
+ assert row is not None
except Exception as exc:
if "no such table: coverage_schema" in str(exc):
self._init_db(db)
@@ -280,6 +302,7 @@ def _read_db(self):
)
) from exc
else:
+ schema_version = row[0]
if schema_version != SCHEMA_VERSION:
raise DataError(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
@@ -287,46 +310,51 @@ def _read_db(self):
)
)
- for row in db.execute("select value from meta where key = 'has_arcs'"):
+ row = db.execute_one("select value from meta where key = 'has_arcs'")
+ if row is not None:
self._has_arcs = bool(int(row[0]))
self._has_lines = not self._has_arcs
- for file_id, path in db.execute("select id, path from file"):
- self._file_map[path] = file_id
+ with db.execute("select id, path from file") as cur:
+ for file_id, path in cur:
+ self._file_map[path] = file_id
- def _init_db(self, db):
+ def _init_db(self, db: SqliteDb) -> None:
"""Write the initial contents of the database."""
if self._debug.should("dataio"):
self._debug.write(f"Initing data file {self._filename!r}")
db.executescript(SCHEMA)
- db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
- db.executemany(
- "insert or ignore into meta (key, value) values (?, ?)",
- [
+ db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
+
+ # When writing metadata, avoid information that will needlessly change
+ # the hash of the data file, unless we're debugging processes.
+ meta_data = [
+ ("version", __version__),
+ ]
+ if self._debug.should("process"):
+ meta_data.extend([
("sys_argv", str(getattr(sys, "argv", None))),
- ("version", __version__),
("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
- ]
- )
+ ])
+ db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data)
- def _connect(self):
+ def _connect(self) -> SqliteDb:
"""Get the SqliteDb object to use."""
if threading.get_ident() not in self._dbs:
self._open_db()
return self._dbs[threading.get_ident()]
- def __bool__(self):
+ def __bool__(self) -> bool:
if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)):
return False
try:
with self._connect() as con:
- rows = con.execute("select * from file limit 1")
- return bool(list(rows))
+ with con.execute("select * from file limit 1") as cur:
+ return bool(list(cur))
except CoverageException:
return False
- @contract(returns="bytes")
- def dumps(self):
+ def dumps(self) -> bytes:
"""Serialize the current data to a byte string.
The format of the serialized data is not documented. It is only
@@ -349,8 +377,7 @@ def dumps(self):
script = con.dump()
return b"z" + zlib.compress(script.encode("utf-8"))
- @contract(data="bytes")
- def loads(self, data):
+ def loads(self, data: bytes) -> None:
"""Deserialize data from :meth:`dumps`.
Use with a newly-created empty :class:`CoverageData` object. It's
@@ -378,7 +405,7 @@ def loads(self, data):
self._read_db()
self._have_used = True
- def _file_id(self, filename, add=False):
+ def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
"""Get the file id for `filename`.
If filename is not in the database yet, add it if `add` is True.
@@ -393,19 +420,19 @@ def _file_id(self, filename, add=False):
)
return self._file_map.get(filename)
- def _context_id(self, context):
+ def _context_id(self, context: str) -> Optional[int]:
"""Get the id for a context."""
assert context is not None
self._start_using()
with self._connect() as con:
row = con.execute_one("select id from context where context = ?", (context,))
if row is not None:
- return row[0]
+ return cast(int, row[0])
else:
return None
@_locked
- def set_context(self, context):
+ def set_context(self, context: Optional[str]) -> None:
"""Set the current context for future :meth:`add_lines` etc.
`context` is a str, the name of the context to use for the next data
@@ -419,7 +446,7 @@ def set_context(self, context):
self._current_context = context
self._current_context_id = None
- def _set_context_id(self):
+ def _set_context_id(self) -> None:
"""Use the _current_context to set _current_context_id."""
context = self._current_context or ""
context_id = self._context_id(context)
@@ -432,7 +459,7 @@ def _set_context_id(self):
(context,)
)
- def base_filename(self):
+ def base_filename(self) -> str:
"""The base filename for storing data.
.. versionadded:: 5.0
@@ -440,7 +467,7 @@ def base_filename(self):
"""
return self._basename
- def data_filename(self):
+ def data_filename(self) -> str:
"""Where is the data stored?
.. versionadded:: 5.0
@@ -449,7 +476,7 @@ def data_filename(self):
return self._filename
@_locked
- def add_lines(self, line_data):
+ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None:
"""Add measured line data.
`line_data` is a dictionary mapping file names to iterables of ints::
@@ -459,7 +486,7 @@ def add_lines(self, line_data):
"""
if self._debug.should("dataop"):
self._debug.write("Adding lines: %d files, %d lines total" % (
- len(line_data), sum(len(lines) for lines in line_data.values())
+ len(line_data), sum(bool(len(lines)) for lines in line_data.values())
))
self._start_using()
self._choose_lines_or_arcs(lines=True)
@@ -471,18 +498,19 @@ def add_lines(self, line_data):
linemap = nums_to_numbits(linenos)
file_id = self._file_id(filename, add=True)
query = "select numbits from line_bits where file_id = ? and context_id = ?"
- existing = list(con.execute(query, (file_id, self._current_context_id)))
+ with con.execute(query, (file_id, self._current_context_id)) as cur:
+ existing = list(cur)
if existing:
linemap = numbits_union(linemap, existing[0][0])
- con.execute(
+ con.execute_void(
"insert or replace into line_bits " +
" (file_id, context_id, numbits) values (?, ?, ?)",
(file_id, self._current_context_id, linemap),
)
@_locked
- def add_arcs(self, arc_data):
+ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None:
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to iterables of pairs of
@@ -502,15 +530,17 @@ def add_arcs(self, arc_data):
with self._connect() as con:
self._set_context_id()
for filename, arcs in arc_data.items():
+ if not arcs:
+ continue
file_id = self._file_id(filename, add=True)
data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
- con.executemany(
+ con.executemany_void(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
data,
)
- def _choose_lines_or_arcs(self, lines=False, arcs=False):
+ def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None:
"""Force the data file to choose between lines and arcs."""
assert lines or arcs
assert not (lines and arcs)
@@ -526,13 +556,13 @@ def _choose_lines_or_arcs(self, lines=False, arcs=False):
self._has_lines = lines
self._has_arcs = arcs
with self._connect() as con:
- con.execute(
+ con.execute_void(
"insert or ignore into meta (key, value) values (?, ?)",
("has_arcs", str(int(arcs)))
)
@_locked
- def add_file_tracers(self, file_tracers):
+ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None:
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
@@ -545,12 +575,7 @@ def add_file_tracers(self, file_tracers):
self._start_using()
with self._connect() as con:
for filename, plugin_name in file_tracers.items():
- file_id = self._file_id(filename)
- if file_id is None:
- raise DataError(
- f"Can't add file tracer data for unmeasured file '{filename}'"
- )
-
+ file_id = self._file_id(filename, add=True)
existing_plugin = self.file_tracer(filename)
if existing_plugin:
if existing_plugin != plugin_name:
@@ -560,24 +585,24 @@ def add_file_tracers(self, file_tracers):
)
)
elif plugin_name:
- con.execute(
+ con.execute_void(
"insert into tracer (file_id, tracer) values (?, ?)",
(file_id, plugin_name)
)
- def touch_file(self, filename, plugin_name=""):
+ def touch_file(self, filename: str, plugin_name: str = "") -> None:
"""Ensure that `filename` appears in the data, empty if needed.
- `plugin_name` is the name of the plugin responsible for this file. It is used
- to associate the right filereporter, etc.
+ `plugin_name` is the name of the plugin responsible for this file.
+ It is used to associate the right filereporter, etc.
"""
self.touch_files([filename], plugin_name)
- def touch_files(self, filenames, plugin_name=""):
+ def touch_files(self, filenames: Collection[str], plugin_name: Optional[str] = None) -> None:
"""Ensure that `filenames` appear in the data, empty if needed.
- `plugin_name` is the name of the plugin responsible for these files. It is used
- to associate the right filereporter, etc.
+ `plugin_name` is the name of the plugin responsible for these files.
+ It is used to associate the right filereporter, etc.
"""
if self._debug.should("dataop"):
self._debug.write(f"Touching {filenames!r}")
@@ -592,11 +617,37 @@ def touch_files(self, filenames, plugin_name=""):
# Set the tracer for this file
self.add_file_tracers({filename: plugin_name})
- def update(self, other_data, aliases=None):
+ def purge_files(self, filenames: Collection[str]) -> None:
+ """Purge any existing coverage data for the given `filenames`.
+
+ .. versionadded:: 7.2
+
+ """
+ if self._debug.should("dataop"):
+ self._debug.write(f"Purging data for {filenames!r}")
+ self._start_using()
+ with self._connect() as con:
+
+ if self._has_lines:
+ sql = "delete from line_bits where file_id=?"
+ elif self._has_arcs:
+ sql = "delete from arc where file_id=?"
+ else:
+ raise DataError("Can't purge files in an empty CoverageData")
+
+ for filename in filenames:
+ file_id = self._file_id(filename, add=False)
+ if file_id is None:
+ continue
+ con.execute_void(sql, (file_id,))
+
+ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None) -> None:
"""Update this data with data from several other :class:`CoverageData` instances.
If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's.
+ re-map paths to match the local machine's. Note: `aliases` is None
+ only when called directly from the test suite.
+
"""
if self._debug.should("dataop"):
self._debug.write("Updating with data from {!r}".format(
@@ -609,86 +660,87 @@ def update(self, other_data, aliases=None):
aliases = aliases or PathAliases()
- # Force the database we're writing to to exist before we start nesting
- # contexts.
+ # Force the database we're writing to to exist before we start nesting contexts.
self._start_using()
# Collector for all arcs, lines and tracers
other_data.read()
with other_data._connect() as con:
# Get files data.
- cur = con.execute("select path from file")
- files = {path: aliases.map(path) for (path,) in cur}
- cur.close()
+ with con.execute("select path from file") as cur:
+ files = {path: aliases.map(path) for (path,) in cur}
# Get contexts data.
- cur = con.execute("select context from context")
- contexts = [context for (context,) in cur]
- cur.close()
+ with con.execute("select context from context") as cur:
+ contexts = [context for (context,) in cur]
# Get arc data.
- cur = con.execute(
+ with con.execute(
"select file.path, context.context, arc.fromno, arc.tono " +
"from arc " +
"inner join file on file.id = arc.file_id " +
"inner join context on context.id = arc.context_id"
- )
- arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
- cur.close()
+ ) as cur:
+ arcs = [
+ (files[path], context, fromno, tono)
+ for (path, context, fromno, tono) in cur
+ ]
# Get line data.
- cur = con.execute(
+ with con.execute(
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
"inner join context on context.id = line_bits.context_id"
- )
- lines = {(files[path], context): numbits for (path, context, numbits) in cur}
- cur.close()
+ ) as cur:
+ lines: Dict[Tuple[str, str], bytes] = {}
+ for path, context, numbits in cur:
+ key = (files[path], context)
+ if key in lines:
+ numbits = numbits_union(lines[key], numbits)
+ lines[key] = numbits
# Get tracer data.
- cur = con.execute(
+ with con.execute(
"select file.path, tracer " +
"from tracer " +
"inner join file on file.id = tracer.file_id"
- )
- tracers = {files[path]: tracer for (path, tracer) in cur}
- cur.close()
+ ) as cur:
+ tracers = {files[path]: tracer for (path, tracer) in cur}
with self._connect() as con:
+ assert con.con is not None
con.con.isolation_level = "IMMEDIATE"
# Get all tracers in the DB. Files not in the tracers are assumed
# to have an empty string tracer. Since Sqlite does not support
# full outer joins, we have to make two queries to fill the
# dictionary.
- this_tracers = {path: "" for path, in con.execute("select path from file")}
- this_tracers.update({
- aliases.map(path): tracer
- for path, tracer in con.execute(
- "select file.path, tracer from tracer " +
- "inner join file on file.id = tracer.file_id"
- )
- })
+ with con.execute("select path from file") as cur:
+ this_tracers = {path: "" for path, in cur}
+ with con.execute(
+ "select file.path, tracer from tracer " +
+ "inner join file on file.id = tracer.file_id"
+ ) as cur:
+ this_tracers.update({
+ aliases.map(path): tracer
+ for path, tracer in cur
+ })
# Create all file and context rows in the DB.
- con.executemany(
+ con.executemany_void(
"insert or ignore into file (path) values (?)",
((file,) for file in files.values())
)
- file_ids = {
- path: id
- for id, path in con.execute("select id, path from file")
- }
+ with con.execute("select id, path from file") as cur:
+ file_ids = {path: id for id, path in cur}
self._file_map.update(file_ids)
- con.executemany(
+ con.executemany_void(
"insert or ignore into context (context) values (?)",
((context,) for context in contexts)
)
- context_ids = {
- context: id
- for id, context in con.execute("select id, context from context")
- }
+ with con.execute("select id, context from context") as cur:
+ context_ids = {context: id for id, context in cur}
# Prepare tracers and fail, if a conflict is found.
# tracer_paths is used to ensure consistency over the tracer data
@@ -715,24 +767,23 @@ def update(self, other_data, aliases=None):
)
# Get line data.
- cur = con.execute(
+ with con.execute(
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
"inner join context on context.id = line_bits.context_id"
- )
- for path, context, numbits in cur:
- key = (aliases.map(path), context)
- if key in lines:
- numbits = numbits_union(lines[key], numbits)
- lines[key] = numbits
- cur.close()
+ ) as cur:
+ for path, context, numbits in cur:
+ key = (aliases.map(path), context)
+ if key in lines:
+ numbits = numbits_union(lines[key], numbits)
+ lines[key] = numbits
if arcs:
self._choose_lines_or_arcs(arcs=True)
# Write the combined data.
- con.executemany(
+ con.executemany_void(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
arc_rows
@@ -740,8 +791,8 @@ def update(self, other_data, aliases=None):
if lines:
self._choose_lines_or_arcs(lines=True)
- con.execute("delete from line_bits")
- con.executemany(
+ con.execute_void("delete from line_bits")
+ con.executemany_void(
"insert into line_bits " +
"(file_id, context_id, numbits) values (?, ?, ?)",
[
@@ -749,7 +800,7 @@ def update(self, other_data, aliases=None):
for (file, context), numbits in lines.items()
]
)
- con.executemany(
+ con.executemany_void(
"insert or ignore into tracer (file_id, tracer) values (?, ?)",
((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
)
@@ -759,7 +810,7 @@ def update(self, other_data, aliases=None):
self._reset()
self.read()
- def erase(self, parallel=False):
+ def erase(self, parallel: bool = False) -> None:
"""Erase the data in this object.
If `parallel` is true, then also deletes data files created from the
@@ -774,24 +825,24 @@ def erase(self, parallel=False):
file_be_gone(self._filename)
if parallel:
data_dir, local = os.path.split(self._filename)
- localdot = local + ".*"
- pattern = os.path.join(os.path.abspath(data_dir), localdot)
+ local_abs_path = os.path.join(os.path.abspath(data_dir), local)
+ pattern = glob.escape(local_abs_path) + ".*"
for filename in glob.glob(pattern):
if self._debug.should("dataio"):
self._debug.write(f"Erasing parallel data file {filename!r}")
file_be_gone(filename)
- def read(self):
+ def read(self) -> None:
"""Start using an existing data file."""
if os.path.exists(self._filename):
with self._connect():
self._have_used = True
- def write(self):
+ def write(self) -> None:
"""Ensure the data is written to the data file."""
pass
- def _start_using(self):
+ def _start_using(self) -> None:
"""Call this before using the database at all."""
if self._pid != os.getpid():
# Looks like we forked! Have to start a new data file.
@@ -802,15 +853,20 @@ def _start_using(self):
self.erase()
self._have_used = True
- def has_arcs(self):
+ def has_arcs(self) -> bool:
"""Does the database have arcs (True) or lines (False)."""
return bool(self._has_arcs)
- def measured_files(self):
- """A set of all files that had been measured."""
+ def measured_files(self) -> Set[str]:
+ """A set of all files that have been measured.
+
+ Note that a file may be mentioned as measured even though no lines or
+ arcs for that file are present in the data.
+
+ """
return set(self._file_map)
- def measured_contexts(self):
+ def measured_contexts(self) -> Set[str]:
"""A set of all contexts that have been measured.
.. versionadded:: 5.0
@@ -818,10 +874,11 @@ def measured_contexts(self):
"""
self._start_using()
with self._connect() as con:
- contexts = {row[0] for row in con.execute("select distinct(context) from context")}
+ with con.execute("select distinct(context) from context") as cur:
+ contexts = {row[0] for row in cur}
return contexts
- def file_tracer(self, filename):
+ def file_tracer(self, filename: str) -> Optional[str]:
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
@@ -839,7 +896,7 @@ def file_tracer(self, filename):
return row[0] or ""
return "" # File was measured, but no tracer associated.
- def set_query_context(self, context):
+ def set_query_context(self, context: str) -> None:
"""Set a context for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
@@ -852,10 +909,10 @@ def set_query_context(self, context):
"""
self._start_using()
with self._connect() as con:
- cur = con.execute("select id from context where context = ?", (context,))
- self._query_context_ids = [row[0] for row in cur.fetchall()]
+ with con.execute("select id from context where context = ?", (context,)) as cur:
+ self._query_context_ids = [row[0] for row in cur.fetchall()]
- def set_query_contexts(self, contexts):
+ def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None:
"""Set a number of contexts for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
@@ -871,12 +928,12 @@ def set_query_contexts(self, contexts):
if contexts:
with self._connect() as con:
context_clause = " or ".join(["context regexp ?"] * len(contexts))
- cur = con.execute("select id from context where " + context_clause, contexts)
- self._query_context_ids = [row[0] for row in cur.fetchall()]
+ with con.execute("select id from context where " + context_clause, contexts) as cur:
+ self._query_context_ids = [row[0] for row in cur.fetchall()]
else:
self._query_context_ids = None
- def lines(self, filename):
+ def lines(self, filename: str) -> Optional[List[TLineNo]]:
"""Get the list of lines executed for a source file.
If the file was not measured, returns None. A file might be measured,
@@ -904,13 +961,14 @@ def lines(self, filename):
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and context_id in (" + ids_array + ")"
data += self._query_context_ids
- bitmaps = list(con.execute(query, data))
+ with con.execute(query, data) as cur:
+ bitmaps = list(cur)
nums = set()
for row in bitmaps:
nums.update(numbits_to_nums(row[0]))
return list(nums)
- def arcs(self, filename):
+ def arcs(self, filename: str) -> Optional[List[TArc]]:
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
@@ -939,10 +997,10 @@ def arcs(self, filename):
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and context_id in (" + ids_array + ")"
data += self._query_context_ids
- arcs = con.execute(query, data)
- return list(arcs)
+ with con.execute(query, data) as cur:
+ return list(cur)
- def contexts_by_lineno(self, filename):
+ def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]:
"""Get the contexts for each line in a file.
Returns:
@@ -969,11 +1027,12 @@ def contexts_by_lineno(self, filename):
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and arc.context_id in (" + ids_array + ")"
data += self._query_context_ids
- for fromno, tono, context in con.execute(query, data):
- if fromno > 0:
- lineno_contexts_map[fromno].add(context)
- if tono > 0:
- lineno_contexts_map[tono].add(context)
+ with con.execute(query, data) as cur:
+ for fromno, tono, context in cur:
+ if fromno > 0:
+ lineno_contexts_map[fromno].add(context)
+ if tono > 0:
+ lineno_contexts_map[tono].add(context)
else:
query = (
"select l.numbits, c.context from line_bits l, context c " +
@@ -985,33 +1044,35 @@ def contexts_by_lineno(self, filename):
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and l.context_id in (" + ids_array + ")"
data += self._query_context_ids
- for numbits, context in con.execute(query, data):
- for lineno in numbits_to_nums(numbits):
- lineno_contexts_map[lineno].add(context)
+ with con.execute(query, data) as cur:
+ for numbits, context in cur:
+ for lineno in numbits_to_nums(numbits):
+ lineno_contexts_map[lineno].add(context)
return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()}
@classmethod
- def sys_info(cls):
+ def sys_info(cls) -> List[Tuple[str, Any]]:
"""Our information for `Coverage.sys_info`.
Returns a list of (key, value) pairs.
"""
with SqliteDb(":memory:", debug=NoDebugging()) as db:
- temp_store = [row[0] for row in db.execute("pragma temp_store")]
- copts = [row[0] for row in db.execute("pragma compile_options")]
+ with db.execute("pragma temp_store") as cur:
+ temp_store = [row[0] for row in cur]
+ with db.execute("pragma compile_options") as cur:
+ copts = [row[0] for row in cur]
copts = textwrap.wrap(", ".join(copts), width=75)
return [
- ("sqlite3_version", sqlite3.version),
("sqlite3_sqlite_version", sqlite3.sqlite_version),
("sqlite3_temp_store", temp_store),
("sqlite3_compile_options", copts),
]
-def filename_suffix(suffix):
+def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]:
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
@@ -1028,10 +1089,12 @@ def filename_suffix(suffix):
# if the process forks.
dice = random.Random(os.urandom(8)).randint(0, 999999)
suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
+ elif suffix is False:
+ suffix = None
return suffix
-class SqliteDb(SimpleReprMixin):
+class SqliteDb(AutoReprMixin):
"""A simple abstraction over a SQLite database.
Use as a context manager, then you can use it like a
@@ -1041,13 +1104,13 @@ class SqliteDb(SimpleReprMixin):
db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,))
"""
- def __init__(self, filename, debug):
+ def __init__(self, filename: str, debug: TDebugCtl) -> None:
self.debug = debug
self.filename = filename
self.nest = 0
- self.con = None
+ self.con: Optional[sqlite3.Connection] = None
- def _connect(self):
+ def _connect(self) -> None:
"""Connect to the db and do universal initialization."""
if self.con is not None:
return
@@ -1069,27 +1132,29 @@ def _connect(self):
# This pragma makes writing faster. It disables rollbacks, but we never need them.
# PyPy needs the .close() calls here, or sqlite gets twisted up:
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
- self.execute("pragma journal_mode=off").close()
+ self.execute_void("pragma journal_mode=off")
# This pragma makes writing faster.
- self.execute("pragma synchronous=off").close()
+ self.execute_void("pragma synchronous=off")
- def close(self):
+ def close(self) -> None:
"""If needed, close the connection."""
if self.con is not None and self.filename != ":memory:":
self.con.close()
self.con = None
- def __enter__(self):
+ def __enter__(self) -> SqliteDb:
if self.nest == 0:
self._connect()
+ assert self.con is not None
self.con.__enter__()
self.nest += 1
return self
- def __exit__(self, exc_type, exc_value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
self.nest -= 1
if self.nest == 0:
try:
+ assert self.con is not None
self.con.__exit__(exc_type, exc_value, traceback)
self.close()
except Exception as exc:
@@ -1097,19 +1162,20 @@ def __exit__(self, exc_type, exc_value, traceback):
self.debug.write(f"EXCEPTION from __exit__: {exc}")
raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
- def execute(self, sql, parameters=()):
+ def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug.should("sql"):
tail = f" with {parameters!r}" if parameters else ""
self.debug.write(f"Executing {sql!r}{tail}")
try:
+ assert self.con is not None
try:
- return self.con.execute(sql, parameters)
+ return self.con.execute(sql, parameters) # type: ignore[arg-type]
except Exception:
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/nedbat/coveragepy/issues/1010
- return self.con.execute(sql, parameters)
+ return self.con.execute(sql, parameters) # type: ignore[arg-type]
except sqlite3.Error as exc:
msg = str(exc)
try:
@@ -1128,15 +1194,36 @@ def execute(self, sql, parameters=()):
self.debug.write(f"EXCEPTION from execute: {msg}")
raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
- def execute_for_rowid(self, sql, parameters=()):
+ @contextlib.contextmanager
+ def execute(
+ self,
+ sql: str,
+ parameters: Iterable[Any] = (),
+ ) -> Iterator[sqlite3.Cursor]:
+ """Context managed :meth:`python:sqlite3.Connection.execute`.
+
+ Use with a ``with`` statement to auto-close the returned cursor.
+ """
+ cur = self._execute(sql, parameters)
+ try:
+ yield cur
+ finally:
+ cur.close()
+
+ def execute_void(self, sql: str, parameters: Iterable[Any] = ()) -> None:
+ """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor."""
+ self._execute(sql, parameters).close()
+
+ def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
"""Like execute, but returns the lastrowid."""
- con = self.execute(sql, parameters)
- rowid = con.lastrowid
+ with self.execute(sql, parameters) as cur:
+ assert cur.lastrowid is not None
+ rowid: int = cur.lastrowid
if self.debug.should("sqldata"):
self.debug.write(f"Row id result: {rowid!r}")
return rowid
- def execute_one(self, sql, parameters=()):
+ def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tuple[Any, ...]]:
"""Execute a statement and return the one row that results.
This is like execute(sql, parameters).fetchone(), except it is
@@ -1145,23 +1232,24 @@ def execute_one(self, sql, parameters=()):
Returns a row, or None if there were no rows.
"""
- rows = list(self.execute(sql, parameters))
+ with self.execute(sql, parameters) as cur:
+ rows = list(cur)
if len(rows) == 0:
return None
elif len(rows) == 1:
- return rows[0]
+ return cast(Tuple[Any, ...], rows[0])
else:
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
- def executemany(self, sql, data):
+ def _executemany(self, sql: str, data: List[Any]) -> sqlite3.Cursor:
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
if self.debug.should("sql"):
- data = list(data)
final = ":" if self.debug.should("sqldata") else ""
self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
if self.debug.should("sqldata"):
for i, row in enumerate(data):
self.debug.write(f"{i:4d}: {row!r}")
+ assert self.con is not None
try:
return self.con.executemany(sql, data)
except Exception: # pragma: cant happen
@@ -1170,14 +1258,22 @@ def executemany(self, sql, data):
# https://github.com/nedbat/coveragepy/issues/1010
return self.con.executemany(sql, data)
- def executescript(self, script):
+ def executemany_void(self, sql: str, data: Iterable[Any]) -> None:
+ """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
+ data = list(data)
+ if data:
+ self._executemany(sql, data).close()
+
+ def executescript(self, script: str) -> None:
"""Same as :meth:`python:sqlite3.Connection.executescript`."""
if self.debug.should("sql"):
self.debug.write("Executing script with {} chars: {}".format(
len(script), clipped_repr(script, 100),
))
- self.con.executescript(script)
+ assert self.con is not None
+ self.con.executescript(script).close()
- def dump(self):
+ def dump(self) -> str:
"""Return a multi-line string, the SQL dump of the database."""
+ assert self.con is not None
return "\n".join(self.con.iterdump())
diff --git a/coverage/summary.py b/coverage/summary.py
deleted file mode 100644
index 861fbc536..000000000
--- a/coverage/summary.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Summary reporting"""
-
-import sys
-
-from coverage.exceptions import ConfigError, NoDataError
-from coverage.misc import human_sorted_items
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-
-
-class SummaryReporter:
- """A reporter for writing the summary report."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.branches = coverage.get_data().has_arcs()
- self.outfile = None
- self.fr_analysis = []
- self.skipped_count = 0
- self.empty_count = 0
- self.total = Numbers(precision=self.config.precision)
- self.fmt_err = "%s %s: %s"
-
- def writeout(self, line):
- """Write a line to the output, adding a newline."""
- self.outfile.write(line.rstrip())
- self.outfile.write("\n")
-
- def report(self, morfs, outfile=None):
- """Writes a report summarizing coverage statistics per module.
-
- `outfile` is a file object to write the summary to. It must be opened
- for native strings (bytes on Python 2, Unicode on Python 3).
-
- """
- self.outfile = outfile or sys.stdout
-
- self.coverage.get_data().set_query_contexts(self.config.report_contexts)
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.report_one_file(fr, analysis)
-
- # Prepare the formatting strings, header, and column sorting.
- max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5])
- fmt_name = "%%- %ds " % max_name
- fmt_skip_covered = "\n%s file%s skipped due to complete coverage."
- fmt_skip_empty = "\n%s empty file%s skipped."
-
- header = (fmt_name % "Name") + " Stmts Miss"
- fmt_coverage = fmt_name + "%6d %6d"
- if self.branches:
- header += " Branch BrPart"
- fmt_coverage += " %6d %6d"
- width100 = Numbers(precision=self.config.precision).pc_str_width()
- header += "%*s" % (width100+4, "Cover")
- fmt_coverage += "%%%ds%%%%" % (width100+3,)
- if self.config.show_missing:
- header += " Missing"
- fmt_coverage += " %s"
- rule = "-" * len(header)
-
- column_order = dict(name=0, stmts=1, miss=2, cover=-1)
- if self.branches:
- column_order.update(dict(branch=3, brpart=4))
-
- # Write the header
- self.writeout(header)
- self.writeout(rule)
-
- # `lines` is a list of pairs, (line text, line values). The line text
- # is a string that will be printed, and line values is a tuple of
- # sortable values.
- lines = []
-
- for (fr, analysis) in self.fr_analysis:
- nums = analysis.numbers
-
- args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
- if self.branches:
- args += (nums.n_branches, nums.n_partial_branches)
- args += (nums.pc_covered_str,)
- if self.config.show_missing:
- args += (analysis.missing_formatted(branches=True),)
- text = fmt_coverage % args
- # Add numeric percent coverage so that sorting makes sense.
- args += (nums.pc_covered,)
- lines.append((text, args))
-
- # Sort the lines and write them out.
- sort_option = (self.config.sort or "name").lower()
- reverse = False
- if sort_option[0] == '-':
- reverse = True
- sort_option = sort_option[1:]
- elif sort_option[0] == '+':
- sort_option = sort_option[1:]
-
- if sort_option == "name":
- lines = human_sorted_items(lines, reverse=reverse)
- else:
- position = column_order.get(sort_option)
- if position is None:
- raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
- lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse)
-
- for line in lines:
- self.writeout(line[0])
-
- # Write a TOTAL line if we had at least one file.
- if self.total.n_files > 0:
- self.writeout(rule)
- args = ("TOTAL", self.total.n_statements, self.total.n_missing)
- if self.branches:
- args += (self.total.n_branches, self.total.n_partial_branches)
- args += (self.total.pc_covered_str,)
- if self.config.show_missing:
- args += ("",)
- self.writeout(fmt_coverage % args)
-
- # Write other final lines.
- if not self.total.n_files and not self.skipped_count:
- raise NoDataError("No data to report.")
-
- if self.config.skip_covered and self.skipped_count:
- self.writeout(
- fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '')
- )
- if self.config.skip_empty and self.empty_count:
- self.writeout(
- fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '')
- )
-
- return self.total.n_statements and self.total.pc_covered
-
- def report_one_file(self, fr, analysis):
- """Report on just one file, the callback from report()."""
- nums = analysis.numbers
- self.total += nums
-
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if self.config.skip_covered and no_missing_lines and no_missing_branches:
- # Don't report on 100% files.
- self.skipped_count += 1
- elif self.config.skip_empty and nums.n_statements == 0:
- # Don't report on empty files.
- self.empty_count += 1
- else:
- self.fr_analysis.append((fr, analysis))
diff --git a/coverage/templite.py b/coverage/templite.py
index ab3cf1cf4..11ea847be 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -10,8 +10,14 @@
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
+from __future__ import annotations
+
import re
+from typing import (
+ Any, Callable, Dict, List, NoReturn, Optional, Set, Union, cast,
+)
+
class TempliteSyntaxError(ValueError):
"""Raised when a template has a syntax error."""
@@ -26,14 +32,14 @@ class TempliteValueError(ValueError):
class CodeBuilder:
"""Build source code conveniently."""
- def __init__(self, indent=0):
- self.code = []
+ def __init__(self, indent: int = 0) -> None:
+ self.code: List[Union[str, CodeBuilder]] = []
self.indent_level = indent
- def __str__(self):
+ def __str__(self) -> str:
return "".join(str(c) for c in self.code)
- def add_line(self, line):
+ def add_line(self, line: str) -> None:
"""Add a line of source to the code.
Indentation and newline will be added for you, don't provide them.
@@ -41,7 +47,7 @@ def add_line(self, line):
"""
self.code.extend([" " * self.indent_level, line, "\n"])
- def add_section(self):
+ def add_section(self) -> CodeBuilder:
"""Add a section, a sub-CodeBuilder."""
section = CodeBuilder(self.indent_level)
self.code.append(section)
@@ -49,22 +55,22 @@ def add_section(self):
INDENT_STEP = 4 # PEP8 says so!
- def indent(self):
+ def indent(self) -> None:
"""Increase the current indent for following lines."""
self.indent_level += self.INDENT_STEP
- def dedent(self):
+ def dedent(self) -> None:
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
- def get_globals(self):
+ def get_globals(self) -> Dict[str, Any]:
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
- global_namespace = {}
+ global_namespace: Dict[str, Any] = {}
exec(python_source, global_namespace)
return global_namespace
@@ -92,7 +98,7 @@ class Templite:
and joined. Be careful, this could join words together!
Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
- which will collapse the whitespace following the tag.
+ which will collapse the white space following the tag.
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string::
@@ -103,15 +109,15 @@ class Templite:
You are interested in {{topic}}.
{% endif %}
''',
- {'upper': str.upper},
+ {"upper": str.upper},
)
text = templite.render({
- 'name': "Ned",
- 'topics': ['Python', 'Geometry', 'Juggling'],
+ "name": "Ned",
+ "topics": ["Python", "Geometry", "Juggling"],
})
"""
- def __init__(self, text, *contexts):
+ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
@@ -122,8 +128,8 @@ def __init__(self, text, *contexts):
for context in contexts:
self.context.update(context)
- self.all_vars = set()
- self.loop_vars = set()
+ self.all_vars: Set[str] = set()
+ self.loop_vars: Set[str] = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
@@ -137,9 +143,9 @@ def __init__(self, text, *contexts):
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
- buffered = []
+ buffered: List[str] = []
- def flush_output():
+ def flush_output() -> None:
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("append_result(%s)" % buffered[0])
@@ -155,37 +161,37 @@ def flush_output():
squash = in_joined = False
for token in tokens:
- if token.startswith('{'):
+ if token.startswith("{"):
start, end = 2, -2
- squash = (token[-3] == '-')
+ squash = (token[-3] == "-")
if squash:
end = -3
- if token.startswith('{#'):
+ if token.startswith("{#"):
# Comment: ignore it and move on.
continue
- elif token.startswith('{{'):
+ elif token.startswith("{{"):
# An expression to evaluate.
expr = self._expr_code(token[start:end].strip())
buffered.append("to_str(%s)" % expr)
else:
- # token.startswith('{%')
+ # token.startswith("{%")
# Action tag: split into words and parse further.
flush_output()
words = token[start:end].strip().split()
- if words[0] == 'if':
+ if words[0] == "if":
# An if statement: evaluate the expression to determine if.
if len(words) != 2:
self._syntax_error("Don't understand if", token)
- ops_stack.append('if')
+ ops_stack.append("if")
code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
- elif words[0] == 'for':
+ elif words[0] == "for":
# A loop: iterate over expression result.
- if len(words) != 4 or words[2] != 'in':
+ if len(words) != 4 or words[2] != "in":
self._syntax_error("Don't understand for", token)
- ops_stack.append('for')
+ ops_stack.append("for")
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_{} in {}:".format(
@@ -194,10 +200,10 @@ def flush_output():
)
)
code.indent()
- elif words[0] == 'joined':
- ops_stack.append('joined')
+ elif words[0] == "joined":
+ ops_stack.append("joined")
in_joined = True
- elif words[0].startswith('end'):
+ elif words[0].startswith("end"):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
@@ -207,7 +213,7 @@ def flush_output():
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
- if end_what == 'joined':
+ if end_what == "joined":
in_joined = False
else:
code.dedent()
@@ -230,11 +236,17 @@ def flush_output():
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
- code.add_line('return "".join(result)')
+ code.add_line("return ''.join(result)")
code.dedent()
- self._render_function = code.get_globals()['render_function']
+ self._render_function = cast(
+ Callable[
+ [Dict[str, Any], Callable[..., Any]],
+ str
+ ],
+ code.get_globals()["render_function"],
+ )
- def _expr_code(self, expr):
+ def _expr_code(self, expr: str) -> str:
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
@@ -252,11 +264,11 @@ def _expr_code(self, expr):
code = "c_%s" % expr
return code
- def _syntax_error(self, msg, thing):
+ def _syntax_error(self, msg: str, thing: Any) -> NoReturn:
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TempliteSyntaxError(f"{msg}: {thing!r}")
- def _variable(self, name, vars_set):
+ def _variable(self, name: str, vars_set: Set[str]) -> None:
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
@@ -268,7 +280,7 @@ def _variable(self, name, vars_set):
self._syntax_error("Not a valid name", name)
vars_set.add(name)
- def render(self, context=None):
+ def render(self, context: Optional[Dict[str, Any]] = None) -> str:
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
@@ -280,7 +292,7 @@ def render(self, context=None):
render_context.update(context)
return self._render_function(render_context, self._do_dots)
- def _do_dots(self, value, *dots):
+ def _do_dots(self, value: Any, *dots: str) -> Any:
"""Evaluate dotted expressions at run-time."""
for dot in dots:
try:
diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py
index 148c34f89..139cb2c1b 100644
--- a/coverage/tomlconfig.py
+++ b/coverage/tomlconfig.py
@@ -3,25 +3,25 @@
"""TOML configuration support for coverage.py"""
-import configparser
+from __future__ import annotations
+
import os
import re
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar
+
from coverage import env
from coverage.exceptions import ConfigError
from coverage.misc import import_third_party, substitute_variables
+from coverage.types import TConfigSectionOut, TConfigValueOut
if env.PYVERSION >= (3, 11, 0, "alpha", 7):
import tomllib # pylint: disable=import-error
+ has_tomllib = True
else:
# TOML support on Python 3.10 and below is an install-time extra option.
- # (Import typing is here because import_third_party will unload any module
- # that wasn't already imported. tomli imports typing, and if we unload it,
- # later it's imported again, and on Python 3.6, this causes infinite
- # recursion.)
- import typing # pylint: disable=unused-import
- tomllib = import_third_party("tomli")
+ tomllib, has_tomllib = import_third_party("tomli")
class TomlDecodeError(Exception):
@@ -29,6 +29,8 @@ class TomlDecodeError(Exception):
pass
+TWant = TypeVar("TWant")
+
class TomlConfigParser:
"""TOML file reading with the interface of HandyConfigParser."""
@@ -36,11 +38,11 @@ class TomlConfigParser:
# need for docstrings.
# pylint: disable=missing-function-docstring
- def __init__(self, our_file):
+ def __init__(self, our_file: bool) -> None:
self.our_file = our_file
- self.data = None
+ self.data: Dict[str, Any] = {}
- def read(self, filenames):
+ def read(self, filenames: Iterable[str]) -> List[str]:
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, (bytes, str, os.PathLike))
@@ -51,22 +53,21 @@ def read(self, filenames):
toml_text = fp.read()
except OSError:
return []
- if tomllib is not None:
- toml_text = substitute_variables(toml_text, os.environ)
+ if has_tomllib:
try:
self.data = tomllib.loads(toml_text)
except tomllib.TOMLDecodeError as err:
raise TomlDecodeError(str(err)) from err
return [filename]
else:
- has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
+ has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE)
if self.our_file or has_toml:
# Looks like they meant to read TOML, but we can't read it.
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
raise ConfigError(msg.format(filename))
return []
- def _get_section(self, section):
+ def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSectionOut]]:
"""Get a section from the data.
Arguments:
@@ -79,8 +80,6 @@ def _get_section(self, section):
"""
prefixes = ["tool.coverage."]
- if self.our_file:
- prefixes.append("")
for prefix in prefixes:
real_section = prefix + section
parts = real_section.split(".")
@@ -95,60 +94,101 @@ def _get_section(self, section):
return None, None
return real_section, data
- def _get(self, section, option):
+ def _get(self, section: str, option: str) -> Tuple[str, TConfigValueOut]:
"""Like .get, but returns the real section name and the value."""
name, data = self._get_section(section)
if data is None:
- raise configparser.NoSectionError(section)
+ raise ConfigError(f"No section: {section!r}")
+ assert name is not None
try:
- return name, data[option]
- except KeyError as exc:
- raise configparser.NoOptionError(option, name) from exc
+ value = data[option]
+ except KeyError:
+ raise ConfigError(f"No option {option!r} in section: {name!r}") from None
+ return name, value
- def has_option(self, section, option):
+ def _get_single(self, section: str, option: str) -> Any:
+ """Get a single-valued option.
+
+ Performs environment substitution if the value is a string. Other types
+ will be converted later as needed.
+ """
+ name, value = self._get(section, option)
+ if isinstance(value, str):
+ value = substitute_variables(value, os.environ)
+ return name, value
+
+ def has_option(self, section: str, option: str) -> bool:
_, data = self._get_section(section)
if data is None:
return False
return option in data
- def has_section(self, section):
+ def real_section(self, section: str) -> Optional[str]:
name, _ = self._get_section(section)
return name
- def options(self, section):
+ def has_section(self, section: str) -> bool:
+ name, _ = self._get_section(section)
+ return bool(name)
+
+ def options(self, section: str) -> List[str]:
_, data = self._get_section(section)
if data is None:
- raise configparser.NoSectionError(section)
+ raise ConfigError(f"No section: {section!r}")
return list(data.keys())
- def get_section(self, section):
+ def get_section(self, section: str) -> TConfigSectionOut:
_, data = self._get_section(section)
- return data
+ return data or {}
- def get(self, section, option):
- _, value = self._get(section, option)
+ def get(self, section: str, option: str) -> Any:
+ _, value = self._get_single(section, option)
return value
- def _check_type(self, section, option, value, type_, type_desc):
- if not isinstance(value, type_):
- raise ValueError(
- 'Option {!r} in section {!r} is not {}: {!r}'
- .format(option, section, type_desc, value)
- )
-
- def getboolean(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, bool, "a boolean")
- return value
-
- def getlist(self, section, option):
+ def _check_type(
+ self,
+ section: str,
+ option: str,
+ value: Any,
+ type_: Type[TWant],
+ converter: Optional[Callable[[Any], TWant]],
+ type_desc: str,
+ ) -> TWant:
+ """Check that `value` has the type we want, converting if needed.
+
+ Returns the resulting value of the desired type.
+ """
+ if isinstance(value, type_):
+ return value
+ if isinstance(value, str) and converter is not None:
+ try:
+ return converter(value)
+ except Exception as e:
+ raise ValueError(
+ f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}"
+ ) from e
+ raise ValueError(
+ f"Option [{section}]{option} is not {type_desc}: {value!r}"
+ )
+
+ def getboolean(self, section: str, option: str) -> bool:
+ name, value = self._get_single(section, option)
+ bool_strings = {"true": True, "false": False}
+ return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean")
+
+ def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]:
+ """Get a list of strings, substituting environment variables in the elements."""
name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
+ values = self._check_type(name, option, values, list, None, "a list")
+ values = [substitute_variables(value, os.environ) for value in values]
+ return name, values
+
+ def getlist(self, section: str, option: str) -> List[str]:
+ _, values = self._get_list(section, option)
return values
- def getregexlist(self, section, option):
- name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
+ def getregexlist(self, section: str, option: str) -> List[str]:
+ name, values = self._get_list(section, option)
for value in values:
value = value.strip()
try:
@@ -157,14 +197,12 @@ def getregexlist(self, section, option):
raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e
return values
- def getint(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, int, "an integer")
- return value
+ def getint(self, section: str, option: str) -> int:
+ name, value = self._get_single(section, option)
+ return self._check_type(name, option, value, int, int, "an integer")
- def getfloat(self, section, option):
- name, value = self._get(section, option)
+ def getfloat(self, section: str, option: str) -> float:
+ name, value = self._get_single(section, option)
if isinstance(value, int):
value = float(value)
- self._check_type(name, option, value, float, "a float")
- return value
+ return self._check_type(name, option, value, float, float, "a float")
diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi
new file mode 100644
index 000000000..d1281767b
--- /dev/null
+++ b/coverage/tracer.pyi
@@ -0,0 +1,35 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+from typing import Any, Dict
+
+from coverage.types import TFileDisposition, TTraceData, TTraceFn, TTracer
+
+class CFileDisposition(TFileDisposition):
+ canonical_filename: Any
+ file_tracer: Any
+ has_dynamic_filename: Any
+ original_filename: Any
+ reason: Any
+ source_filename: Any
+ trace: Any
+ def __init__(self) -> None: ...
+
+class CTracer(TTracer):
+ check_include: Any
+ concur_id_func: Any
+ data: TTraceData
+ disable_plugin: Any
+ file_tracers: Any
+ should_start_context: Any
+ should_trace: Any
+ should_trace_cache: Any
+ switch_context: Any
+ trace_arcs: Any
+ warn: Any
+ def __init__(self) -> None: ...
+ def activity(self) -> bool: ...
+ def get_stats(self) -> Dict[str, int]: ...
+ def reset_activity(self) -> Any: ...
+ def start(self) -> TTraceFn: ...
+ def stop(self) -> None: ...
diff --git a/coverage/types.py b/coverage/types.py
new file mode 100644
index 000000000..828ab20bb
--- /dev/null
+++ b/coverage/types.py
@@ -0,0 +1,197 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""
+Types for use throughout coverage.py.
+"""
+
+from __future__ import annotations
+
+import os
+import pathlib
+
+from types import FrameType, ModuleType
+from typing import (
+ Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Type, Union,
+ TYPE_CHECKING,
+)
+
+if TYPE_CHECKING:
+ # Protocol is new in 3.8. PYVERSIONS
+ from typing import Protocol
+
+ from coverage.plugin import FileTracer
+
+else:
+ class Protocol: # pylint: disable=missing-class-docstring
+ pass
+
+## File paths
+
+# For arguments that are file paths:
+if TYPE_CHECKING:
+ FilePath = Union[str, os.PathLike[str]]
+else:
+ # PathLike < python3.9 doesn't support subscription
+ FilePath = Union[str, os.PathLike]
+# For testing FilePath arguments
+FilePathClasses = [str, pathlib.Path]
+FilePathType = Union[Type[str], Type[pathlib.Path]]
+
+## Python tracing
+
+class TTraceFn(Protocol):
+ """A Python trace function."""
+ def __call__(
+ self,
+ frame: FrameType,
+ event: str,
+ arg: Any,
+ lineno: Optional[TLineNo] = None # Our own twist, see collector.py
+ ) -> Optional[TTraceFn]:
+ ...
+
+## Coverage.py tracing
+
+# Line numbers are pervasive enough that they deserve their own type.
+TLineNo = int
+
+TArc = Tuple[TLineNo, TLineNo]
+
+class TFileDisposition(Protocol):
+ """A simple value type for recording what to do with a file."""
+
+ original_filename: str
+ canonical_filename: str
+ source_filename: Optional[str]
+ trace: bool
+ reason: str
+ file_tracer: Optional[FileTracer]
+ has_dynamic_filename: bool
+
+
+# When collecting data, we use a dictionary with a few possible shapes. The
+# keys are always file names.
+# - If measuring line coverage, the values are sets of line numbers.
+# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs
+# of line numbers).
+# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
+# line numbers combined into one integer).
+
+TTraceFileData = Union[Set[TLineNo], Set[TArc], Set[int]]
+
+TTraceData = Dict[str, TTraceFileData]
+
+class TTracer(Protocol):
+ """Either CTracer or PyTracer."""
+
+ data: TTraceData
+ trace_arcs: bool
+ should_trace: Callable[[str, FrameType], TFileDisposition]
+ should_trace_cache: Mapping[str, Optional[TFileDisposition]]
+ should_start_context: Optional[Callable[[FrameType], Optional[str]]]
+ switch_context: Optional[Callable[[Optional[str]], None]]
+ warn: TWarnFn
+
+ def __init__(self) -> None:
+ ...
+
+ def start(self) -> TTraceFn:
+ """Start this tracer, returning a trace function."""
+
+ def stop(self) -> None:
+ """Stop this tracer."""
+
+ def activity(self) -> bool:
+ """Has there been any activity?"""
+
+ def reset_activity(self) -> None:
+ """Reset the activity() flag."""
+
+ def get_stats(self) -> Optional[Dict[str, int]]:
+ """Return a dictionary of statistics, or None."""
+
+## Coverage
+
+# Many places use kwargs as Coverage kwargs.
+TCovKwargs = Any
+
+
+## Configuration
+
+# One value read from a config file.
+TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]]
+TConfigValueOut = Optional[Union[bool, int, float, str, List[str]]]
+# An entire config section, mapping option names to values.
+TConfigSectionIn = Mapping[str, TConfigValueIn]
+TConfigSectionOut = Mapping[str, TConfigValueOut]
+
+class TConfigurable(Protocol):
+ """Something that can proxy to the coverage configuration settings."""
+
+ def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
+ """Get an option from the configuration.
+
+ `option_name` is a colon-separated string indicating the section and
+ option name. For example, the ``branch`` option in the ``[run]``
+ section of the config file would be indicated with `"run:branch"`.
+
+ Returns the value of the option.
+
+ """
+
+ def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None:
+ """Set an option in the configuration.
+
+ `option_name` is a colon-separated string indicating the section and
+ option name. For example, the ``branch`` option in the ``[run]``
+ section of the config file would be indicated with `"run:branch"`.
+
+ `value` is the new value for the option.
+
+ """
+
+class TPluginConfig(Protocol):
+ """Something that can provide options to a plugin."""
+
+ def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
+ """Get the options for a plugin."""
+
+
+## Parsing
+
+TMorf = Union[ModuleType, str]
+
+TSourceTokenLines = Iterable[List[Tuple[str, str]]]
+
+## Plugins
+
+class TPlugin(Protocol):
+ """What all plugins have in common."""
+ _coverage_plugin_name: str
+ _coverage_enabled: bool
+
+
+## Debugging
+
+class TWarnFn(Protocol):
+ """A callable warn() function."""
+ def __call__(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None:
+ ...
+
+
+class TDebugCtl(Protocol):
+ """A DebugControl object, or something like it."""
+
+ def should(self, option: str) -> bool:
+ """Decide whether to output debug information in category `option`."""
+
+ def write(self, msg: str) -> None:
+ """Write a line of debug output."""
+
+
+class TWritable(Protocol):
+ """Anything that can be written to."""
+
+ def write(self, msg: str) -> None:
+ """Write a message."""
diff --git a/coverage/version.py b/coverage/version.py
index 33fce3c28..c48974967 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -4,28 +4,47 @@
"""The version and URL for coverage.py"""
# This file is exec'ed in setup.py, don't import anything!
-# Same semantics as sys.version_info.
-version_info = (6, 4, 2, "final", 0)
-
-
-def _make_version(major, minor, micro, releaselevel, serial):
+from __future__ import annotations
+
+# version_info: same semantics as sys.version_info.
+# _dev: the .devN suffix if any.
+version_info = (7, 2, 7, "final", 0)
+_dev = 0
+
+
+def _make_version(
+ major: int,
+ minor: int,
+ micro: int,
+ releaselevel: str = "final",
+ serial: int = 0,
+ dev: int = 0,
+) -> str:
"""Create a readable version string from version_info tuple components."""
- assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
+ assert releaselevel in ["alpha", "beta", "candidate", "final"]
version = "%d.%d.%d" % (major, minor, micro)
- if releaselevel != 'final':
- short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
+ if releaselevel != "final":
+ short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
version += f"{short}{serial}"
+ if dev != 0:
+ version += f".dev{dev}"
return version
-def _make_url(major, minor, micro, releaselevel, serial):
+def _make_url(
+ major: int,
+ minor: int,
+ micro: int,
+ releaselevel: str,
+ serial: int = 0,
+ dev: int = 0,
+) -> str:
"""Make the URL people should start at for this version of coverage.py."""
- url = "https://coverage.readthedocs.io"
- if releaselevel != 'final':
- # For pre-releases, use a version-specific URL.
- url += "/en/" + _make_version(major, minor, micro, releaselevel, serial)
- return url
+ return (
+ "https://coverage.readthedocs.io/en/"
+ + _make_version(major, minor, micro, releaselevel, serial, dev)
+ )
-__version__ = _make_version(*version_info)
-__url__ = _make_url(*version_info)
+__version__ = _make_version(*version_info, _dev)
+__url__ = _make_url(*version_info, _dev)
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index 2c34cb546..819b4c6bc 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -3,28 +3,55 @@
"""XML reporting for coverage.py"""
+from __future__ import annotations
+
import os
import os.path
import sys
import time
import xml.dom.minidom
-from coverage import __url__, __version__, files
+from dataclasses import dataclass
+from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING
+
+from coverage import __version__, files
from coverage.misc import isolate_module, human_sorted, human_sorted_items
-from coverage.report import get_analysis_to_report
+from coverage.plugin import FileReporter
+from coverage.report_core import get_analysis_to_report
+from coverage.results import Analysis
+from coverage.types import TMorf
+from coverage.version import __url__
+
+if TYPE_CHECKING:
+ from coverage import Coverage
os = isolate_module(os)
-DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
+DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd"
-def rate(hit, num):
+def rate(hit: int, num: int) -> str:
"""Return the fraction of `hit`/`num`, as a string."""
if num == 0:
return "1"
else:
- return "%.4g" % (float(hit) / num)
+ return "%.4g" % (hit / num)
+
+
+@dataclass
+class PackageData:
+ """Data we keep about each "package" (in Java terms)."""
+ elements: Dict[str, xml.dom.minidom.Element]
+ hits: int
+ lines: int
+ br_hits: int
+ branches: int
+
+
+def appendChild(parent: Any, child: Any) -> None:
+ """Append a child to a parent, in a way mypy will shut up about."""
+ parent.appendChild(child)
class XmlReporter:
@@ -32,7 +59,7 @@ class XmlReporter:
report_type = "XML report"
- def __init__(self, coverage):
+ def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
@@ -40,13 +67,15 @@ def __init__(self, coverage):
if self.config.source:
for src in self.config.source:
if os.path.exists(src):
- if not self.config.relative_files:
+ if self.config.relative_files:
+ src = src.rstrip(r"\/")
+ else:
src = files.canonical_filename(src)
self.source_paths.add(src)
- self.packages = {}
- self.xml_out = None
+ self.packages: Dict[str, PackageData] = {}
+ self.xml_out: xml.dom.minidom.Document
- def report(self, morfs, outfile=None):
+ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or file names.
@@ -60,6 +89,7 @@ def report(self, morfs, outfile=None):
# Create the DOM that will store the data.
impl = xml.dom.minidom.getDOMImplementation()
+ assert impl is not None
self.xml_out = impl.createDocument(None, "coverage", None)
# Write header stuff.
@@ -81,9 +111,9 @@ def report(self, morfs, outfile=None):
# Populate the XML DOM with the source info.
for path in human_sorted(self.source_paths):
xsource = self.xml_out.createElement("source")
- xsources.appendChild(xsource)
+ appendChild(xsources, xsource)
txt = self.xml_out.createTextNode(path)
- xsource.appendChild(txt)
+ appendChild(xsource, txt)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
@@ -93,26 +123,25 @@ def report(self, morfs, outfile=None):
# Populate the XML DOM with the package info.
for pkg_name, pkg_data in human_sorted_items(self.packages.items()):
- class_elts, lhits, lnum, bhits, bnum = pkg_data
xpackage = self.xml_out.createElement("package")
- xpackages.appendChild(xpackage)
+ appendChild(xpackages, xpackage)
xclasses = self.xml_out.createElement("classes")
- xpackage.appendChild(xclasses)
- for _, class_elt in human_sorted_items(class_elts.items()):
- xclasses.appendChild(class_elt)
- xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
- xpackage.setAttribute("line-rate", rate(lhits, lnum))
+ appendChild(xpackage, xclasses)
+ for _, class_elt in human_sorted_items(pkg_data.elements.items()):
+ appendChild(xclasses, class_elt)
+ xpackage.setAttribute("name", pkg_name.replace(os.sep, "."))
+ xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines))
if has_arcs:
- branch_rate = rate(bhits, bnum)
+ branch_rate = rate(pkg_data.br_hits, pkg_data.branches)
else:
branch_rate = "0"
xpackage.setAttribute("branch-rate", branch_rate)
xpackage.setAttribute("complexity", "0")
- lnum_tot += lnum
- lhits_tot += lhits
- bnum_tot += bnum
- bhits_tot += bhits
+ lhits_tot += pkg_data.hits
+ lnum_tot += pkg_data.lines
+ bhits_tot += pkg_data.br_hits
+ bnum_tot += pkg_data.branches
xcoverage.setAttribute("lines-valid", str(lnum_tot))
xcoverage.setAttribute("lines-covered", str(lhits_tot))
@@ -138,37 +167,38 @@ def report(self, morfs, outfile=None):
pct = 100.0 * (lhits_tot + bhits_tot) / denom
return pct
- def xml_file(self, fr, analysis, has_arcs):
+ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None:
"""Add to the XML report for a single file."""
if self.config.skip_empty:
if analysis.numbers.n_statements == 0:
return
- # Create the 'lines' and 'package' XML elements, which
+ # Create the "lines" and "package" XML elements, which
# are populated later. Note that a package == a directory.
filename = fr.filename.replace("\\", "/")
for source_path in self.source_paths:
- source_path = files.canonical_filename(source_path)
+ if not self.config.relative_files:
+ source_path = files.canonical_filename(source_path)
if filename.startswith(source_path.replace("\\", "/") + "/"):
rel_name = filename[len(source_path)+1:]
break
else:
- rel_name = fr.relative_filename()
+ rel_name = fr.relative_filename().replace("\\", "/")
self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
dirname = os.path.dirname(rel_name) or "."
dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
package_name = dirname.replace("/", ".")
- package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
+ package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0))
- xclass = self.xml_out.createElement("class")
+ xclass: xml.dom.minidom.Element = self.xml_out.createElement("class")
- xclass.appendChild(self.xml_out.createElement("methods"))
+ appendChild(xclass, self.xml_out.createElement("methods"))
xlines = self.xml_out.createElement("lines")
- xclass.appendChild(xlines)
+ appendChild(xclass, xlines)
xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
xclass.setAttribute("filename", rel_name.replace("\\", "/"))
@@ -177,7 +207,7 @@ def xml_file(self, fr, analysis, has_arcs):
branch_stats = analysis.branch_stats()
missing_branch_arcs = analysis.missing_branch_arcs()
- # For each statement, create an XML 'line' element.
+ # For each statement, create an XML "line" element.
for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
@@ -197,7 +227,7 @@ def xml_file(self, fr, analysis, has_arcs):
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
xline.setAttribute("missing-branches", ",".join(annlines))
- xlines.appendChild(xline)
+ appendChild(xlines, xline)
class_lines = len(analysis.statements)
class_hits = class_lines - len(analysis.missing)
@@ -207,8 +237,8 @@ def xml_file(self, fr, analysis, has_arcs):
missing_branches = sum(t - k for t, k in branch_stats.values())
class_br_hits = class_branches - missing_branches
else:
- class_branches = 0.0
- class_br_hits = 0.0
+ class_branches = 0
+ class_br_hits = 0
# Finalize the statistics that are collected in the XML DOM.
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
@@ -218,13 +248,13 @@ def xml_file(self, fr, analysis, has_arcs):
branch_rate = "0"
xclass.setAttribute("branch-rate", branch_rate)
- package[0][rel_name] = xclass
- package[1] += class_hits
- package[2] += class_lines
- package[3] += class_br_hits
- package[4] += class_branches
+ package.elements[rel_name] = xclass
+ package.hits += class_hits
+ package.lines += class_lines
+ package.br_hits += class_br_hits
+ package.branches += class_branches
-def serialize_xml(dom):
+def serialize_xml(dom: xml.dom.minidom.Document) -> str:
"""Serialize a minidom node to XML."""
return dom.toprettyxml()
diff --git a/doc/changes.rst b/doc/changes.rst
index 42af57c74..54a3c81be 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -6,7 +6,7 @@
.. The recent changes from the top-level file:
.. include:: ../CHANGES.rst
- :end-before: endchangesinclude
+ :end-before: scriv-end-here
.. Older changes here:
@@ -383,7 +383,7 @@ Version 5.0a6 — 2019-07-16
argument, `no_disk` (default: False). Setting it to True prevents writing
any data to the disk. This is useful for transient data objects.
-- Added the classmethod :meth:`.Coverage.current` to get the latest started
+- Added the class method :meth:`.Coverage.current` to get the latest started
Coverage instance.
- Multiprocessing support in Python 3.8 was broken, but is now fixed. Closes
@@ -556,7 +556,7 @@ Version 5.0a2 — 2018-09-03
- Development moved from `Bitbucket`_ to `GitHub`_.
-- HTML files no longer have trailing and extra whitespace.
+- HTML files no longer have trailing and extra white space.
- The sort order in the HTML report is stored in local storage rather than
cookies, closing `issue 611`_. Thanks, Federico Bond.
@@ -794,7 +794,7 @@ Version 4.4b1 — 2017-04-04
also continue measurement. Both `issue 79`_ and `issue 448`_ described this
problem, and have been fixed.
-- Plugins can now find unexecuted files if they choose, by implementing the
+- Plugins can now find un-executed files if they choose, by implementing the
`find_executable_files` method. Thanks, Emil Madsen.
- Minimal IronPython support. You should be able to run IronPython programs
@@ -1202,7 +1202,7 @@ Version 4.1b2 — 2016-01-23
- The XML report now produces correct package names for modules found in
directories specified with ``source=``. Fixes `issue 465`_.
-- ``coverage report`` won't produce trailing whitespace.
+- ``coverage report`` won't produce trailing white space.
.. _issue 465: https://github.com/nedbat/coveragepy/issues/465
.. _issue 466: https://github.com/nedbat/coveragepy/issues/466
@@ -1532,7 +1532,7 @@ Version 4.0a6 — 2015-06-21
- Files with incorrect encoding declaration comments are no longer ignored by
the reporting commands, fixing `issue 351`_.
-- HTML reports now include a timestamp in the footer, closing `issue 299`_.
+- HTML reports now include a time stamp in the footer, closing `issue 299`_.
Thanks, Conrad Ho.
- HTML reports now begrudgingly use double-quotes rather than single quotes,
@@ -1685,7 +1685,7 @@ Version 4.0a2 — 2015-01-14
`issue 328`_. Thanks, Buck Evan.
- The regex for matching exclusion pragmas has been fixed to allow more kinds
- of whitespace, fixing `issue 334`_.
+ of white space, fixing `issue 334`_.
- Made some PyPy-specific tweaks to improve speed under PyPy. Thanks, Alex
Gaynor.
@@ -1739,7 +1739,7 @@ Version 4.0a1 — 2014-09-27
`issue 285`_. Thanks, Chris Rose.
- HTML reports no longer raise UnicodeDecodeError if a Python file has
- undecodable characters, fixing `issue 303`_ and `issue 331`_.
+ un-decodable characters, fixing `issue 303`_ and `issue 331`_.
- The annotate command will now annotate all files, not just ones relative to
the current directory, fixing `issue 57`_.
@@ -1791,7 +1791,7 @@ Version 3.7 — 2013-10-06
- Coverage.py properly supports .pyw files, fixing `issue 261`_.
- Omitting files within a tree specified with the ``source`` option would
- cause them to be incorrectly marked as unexecuted, as described in
+ cause them to be incorrectly marked as un-executed, as described in
`issue 218`_. This is now fixed.
- When specifying paths to alias together during data combining, you can now
@@ -1802,7 +1802,7 @@ Version 3.7 — 2013-10-06
(``build/$BUILDNUM/src``).
- Trying to create an XML report with no files to report on, would cause a
- ZeroDivideError, but no longer does, fixing `issue 250`_.
+ ZeroDivisionError, but no longer does, fixing `issue 250`_.
- When running a threaded program under the Python tracer, coverage.py no
longer issues a spurious warning about the trace function changing: "Trace
@@ -1905,7 +1905,7 @@ Version 3.6b1 — 2012-11-28
Thanks, Marcus Cobden.
- Coverage percentage metrics are now computed slightly differently under
- branch coverage. This means that completely unexecuted files will now
+ branch coverage. This means that completely un-executed files will now
correctly have 0% coverage, fixing `issue 156`_. This also means that your
total coverage numbers will generally now be lower if you are measuring
branch coverage.
@@ -2068,7 +2068,7 @@ Version 3.5.2b1 — 2012-04-29
- Now the exit status of your product code is properly used as the process
status when running ``python -m coverage run ...``. Thanks, JT Olds.
-- When installing into pypy, we no longer attempt (and fail) to compile
+- When installing into PyPy, we no longer attempt (and fail) to compile
the C tracer function, closing `issue 166`_.
.. _issue 142: https://github.com/nedbat/coveragepy/issues/142
@@ -2234,9 +2234,10 @@ Version 3.4 — 2010-09-19
Version 3.4b2 — 2010-09-06
--------------------------
-- Completely unexecuted files can now be included in coverage results, reported
- as 0% covered. This only happens if the --source option is specified, since
- coverage.py needs guidance about where to look for source files.
+- Completely un-executed files can now be included in coverage results,
+ reported as 0% covered. This only happens if the --source option is
+ specified, since coverage.py needs guidance about where to look for source
+ files.
- The XML report output now properly includes a percentage for branch coverage,
fixing `issue 65`_ and `issue 81`_.
@@ -2374,7 +2375,7 @@ Version 3.3 — 2010-02-24
`config_file=False`.
- Fixed a problem with nested loops having their branch possibilities
- mischaracterized: `issue 39`_.
+ mis-characterized: `issue 39`_.
- Added coverage.process_start to enable coverage measurement when Python
starts.
diff --git a/doc/cmd.rst b/doc/cmd.rst
index e2a60fc20..7db6746a8 100644
--- a/doc/cmd.rst
+++ b/doc/cmd.rst
@@ -6,6 +6,12 @@
Running "make prebuild" will bring it up to date.
.. [[[cog
+ # optparse wraps help to the COLUMNS value. Set it here to be sure it's
+ # consistent regardless of the environment. Has to be set before we
+ # import cmdline.py, which creates the optparse objects.
+ import os
+ os.environ["COLUMNS"] = "80"
+
import contextlib
import io
import re
@@ -342,7 +348,7 @@ single directory, and use the **combine** command to combine them into one
$ coverage combine
-You can also name directories or files on the command line::
+You can also name directories or files to be combined on the command line::
$ coverage combine data1.dat windows_data_files/
@@ -364,19 +370,6 @@ An existing combined data file is ignored and re-written. If you want to use
runs, use the ``--append`` switch on the **combine** command. This behavior
was the default before version 4.2.
-To combine data for a source file, coverage has to find its data in each of the
-data files. Different test runs may run the same source file from different
-locations. For example, different operating systems will use different paths
-for the same file, or perhaps each Python version is run from a different
-subdirectory. Coverage needs to know that different file paths are actually
-the same source file for reporting purposes.
-
-You can tell coverage.py how different source locations relate with a
-``[paths]`` section in your configuration file (see :ref:`config_paths`).
-It might be more convenient to use the ``[run] relative_files``
-setting to store relative file paths (see :ref:`relative_files
-`).
-
If any of the data files can't be read, coverage.py will print a warning
indicating the file and the problem.
@@ -389,11 +382,10 @@ want to keep those files, use the ``--keep`` command-line option.
$ coverage combine --help
Usage: coverage combine [options] ...
- Combine data from multiple coverage files collected with 'run -p'. The
- combined results are written to a single file representing the union of the
- data. The positional arguments are data files or directories containing data
- files. If no paths are provided, data files in the default data file's
- directory are combined.
+ Combine data from multiple coverage files. The combined results are written to
+ a single file representing the union of the data. The positional arguments are
+ data files or directories containing data files. If no paths are provided,
+ data files in the default data file's directory are combined.
Options:
-a, --append Append coverage data to .coverage, otherwise it starts
@@ -409,7 +401,29 @@ want to keep those files, use the ``--keep`` command-line option.
--rcfile=RCFILE Specify configuration file. By default '.coveragerc',
'setup.cfg', 'tox.ini', and 'pyproject.toml' are
tried. [env: COVERAGE_RCFILE]
-.. [[[end]]] (checksum: 0ac91b0781d7146b87953f09090dab92)
+.. [[[end]]] (checksum: 0bdd83f647ee76363c955bedd9ddf749)
+
+
+.. _cmd_combine_remapping:
+
+Re-mapping paths
+................
+
+To combine data for a source file, coverage has to find its data in each of the
+data files. Different test runs may run the same source file from different
+locations. For example, different operating systems will use different paths
+for the same file, or perhaps each Python version is run from a different
+subdirectory. Coverage needs to know that different file paths are actually
+the same source file for reporting purposes.
+
+You can tell coverage.py how different source locations relate with a
+``[paths]`` section in your configuration file (see :ref:`config_paths`).
+It might be more convenient to use the ``[run] relative_files``
+setting to store relative file paths (see :ref:`relative_files
+`).
+
+If data isn't combining properly, you can see details about the inner workings
+with ``--debug=pathmap``.
.. _cmd_erase:
@@ -510,6 +524,8 @@ as a percentage.
file. Defaults to '.coverage'. [env: COVERAGE_FILE]
--fail-under=MIN Exit with a status of 2 if the total coverage is less
than MIN.
+ --format=FORMAT Output format, either text (default), markdown, or
+ total.
-i, --ignore-errors Ignore errors while reading source files.
--include=PAT1,PAT2,...
Include only files whose paths match one of these
@@ -532,7 +548,7 @@ as a percentage.
--rcfile=RCFILE Specify configuration file. By default '.coveragerc',
'setup.cfg', 'tox.ini', and 'pyproject.toml' are
tried. [env: COVERAGE_RCFILE]
-.. [[[end]]] (checksum: 2f8dde61bab2f44fbfe837aeae87dfd2)
+.. [[[end]]] (checksum: 167272a29d9e7eb017a592a0e0747a06)
The ``-m`` flag also shows the line numbers of missing statements::
@@ -583,6 +599,12 @@ decimal point in coverage percentages, defaulting to none.
The ``--sort`` option is the name of a column to sort the report by.
+The ``--format`` option controls the style of the report. ``--format=text``
+creates plain text tables as shown above. ``--format=markdown`` creates
+Markdown tables. ``--format=total`` writes out a single number, the total
+coverage percentage as shown at the end of the tables, but without a percent
+sign.
+
Other common reporting options are described above in :ref:`cmd_reporting`.
These options can also be set in your .coveragerc file. See
:ref:`Configuration: [report] `.
@@ -602,9 +624,10 @@ Here's a `sample report`__.
__ https://nedbatchelder.com/files/sample_coverage_html/index.html
-Lines are highlighted green for executed, red for missing, and gray for
-excluded. The counts at the top of the file are buttons to turn on and off
-the highlighting.
+Lines are highlighted: green for executed, red for missing, and gray for
+excluded. If you've used branch coverage, partial branches are yellow. The
+colored counts at the top of the file are buttons to turn on and off the
+highlighting.
A number of keyboard shortcuts are available for navigating the report.
Click the keyboard icon in the upper right to see the complete list.
@@ -1000,13 +1023,17 @@ of operation to log:
* ``multiproc``: log the start and stop of multiprocessing processes.
+* ``pathmap``: log the remapping of paths that happens during ``coverage
+ combine``. See :ref:`config_paths`.
+
* ``pid``: annotate all warnings and debug output with the process and thread
ids.
* ``plugin``: print information about plugin operations.
* ``process``: show process creation information, and changes in the current
- directory.
+ directory. This also writes a time stamp and command arguments into the data
+ file.
* ``pybehave``: show the values of `internal flags `_ describing the
behavior of the current version of Python.
@@ -1030,7 +1057,9 @@ Debug options can also be set with the ``COVERAGE_DEBUG`` environment variable,
a comma-separated list of these options, or in the :ref:`config_run_debug`
section of the .coveragerc file.
-The debug output goes to stderr, unless the ``COVERAGE_DEBUG_FILE`` environment
-variable names a different file, which will be appended to.
-``COVERAGE_DEBUG_FILE`` accepts the special names ``stdout`` and ``stderr`` to
-write to those destinations.
+The debug output goes to stderr, unless the :ref:`config_run_debug_file`
+setting or the ``COVERAGE_DEBUG_FILE`` environment variable names a different
+file, which will be appended to. This can be useful because many test runners
+capture output, which could hide important details. ``COVERAGE_DEBUG_FILE``
+accepts the special names ``stdout`` and ``stderr`` to write to those
+destinations.
diff --git a/doc/conf.py b/doc/conf.py
index f671ef4e3..bee8c14b2 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -36,13 +36,14 @@
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
- 'sphinxcontrib.spelling',
'sphinx.ext.intersphinx',
'sphinxcontrib.restbuilder',
'sphinx.ext.napoleon',
- 'sphinx_tabs.tabs',
+ #'sphinx_tabs.tabs',
]
+autodoc_typehints = "description"
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -57,18 +58,20 @@
# General information about the project.
project = 'Coverage.py'
-copyright = '2009\N{EN DASH}2022, Ned Batchelder' # CHANGEME # pylint: disable=redefined-builtin
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
-#
-# The short X.Y.Z version. # CHANGEME
-version = "6.4.2"
-# The full version, including alpha/beta/rc tags. # CHANGEME
-release = "6.4.2"
-# The date of release, in "monthname day, year" format. # CHANGEME
-release_date = "July 12, 2022"
+
+# @@@ editable
+copyright = "2009–2023, Ned Batchelder" # pylint: disable=redefined-builtin
+# The short X.Y.Z version.
+version = "7.2.7"
+# The full version, including alpha/beta/rc tags.
+release = "7.2.7"
+# The date of release, in "monthname day, year" format.
+release_date = "May 29, 2023"
+# @@@ end
rst_epilog = """
.. |release_date| replace:: {release_date}
@@ -121,6 +124,19 @@
'python': ('https://docs.python.org/3', None),
}
+nitpick_ignore = [
+ ("py:class", "frame"),
+ ("py:class", "module"),
+ ("py:class", "DefaultValue"),
+ ("py:class", "FilePath"),
+ ("py:class", "TWarnFn"),
+ ("py:class", "TDebugCtl"),
+]
+
+nitpick_ignore_regex = [
+ (r"py:class", r"coverage\..*\..*"),
+]
+
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
@@ -203,6 +219,9 @@
# -- Spelling ---
if any("spell" in arg for arg in sys.argv):
+ # sphinxcontrib.spelling needs the native "enchant" library, which often is
+ # missing, so only use the extension if we are specifically spell-checking.
+ extensions += ['sphinxcontrib.spelling']
names_file = tempfile.NamedTemporaryFile(mode='w', prefix="coverage_names_", suffix=".txt")
with open("../CONTRIBUTORS.txt") as contributors:
names = set(re.split(r"[^\w']", contributors.read()))
diff --git a/doc/config.rst b/doc/config.rst
index 70f56c0e5..0100d89e1 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -7,6 +7,8 @@
Configuration reference
=======================
+.. highlight:: ini
+
Coverage.py options can be specified in a configuration file. This makes it
easier to re-run coverage.py with consistent settings, and also allows for
specification of options that are otherwise only available in the
@@ -29,10 +31,14 @@ Coverage.py will read settings from other usual configuration files if no other
configuration file is used. It will automatically read from "setup.cfg" or
"tox.ini" if they exist. In this case, the section names have "coverage:"
prefixed, so the ``[run]`` options described below will be found in the
-``[coverage:run]`` section of the file. If coverage.py is installed with the
-``toml`` extra (``pip install coverage[toml]``), it will automatically read
-from "pyproject.toml". Configuration must be within the ``[tool.coverage]``
-section, for example, ``[tool.coverage.run]``.
+``[coverage:run]`` section of the file.
+
+Coverage.py will read from "pyproject.toml" if TOML support is available,
+either because you are running on Python 3.11 or later, or because you
+installed with the ``toml`` extra (``pip install coverage[toml]``).
+Configuration must be within the ``[tool.coverage]`` section, for example,
+``[tool.coverage.run]``. Environment variable expansion in values is
+available, but only within quoted strings, even for non-string values.
Syntax
@@ -73,10 +79,7 @@ Here's a sample configuration file::
[report]
# Regexes for lines to exclude from consideration
- exclude_lines =
- # Have to re-enable the standard pragma
- pragma: no cover
-
+ exclude_also =
# Don't complain about missing debug-only code:
def __repr__
if self\.debug
@@ -197,6 +200,15 @@ include a short string at the end, the name of the warning. See
` for details.
+.. _config_run_debug_file:
+
+[run] debug_file
+................
+
+(string) A file name to write debug output to. See :ref:`the run --debug
+option ` for details.
+
+
.. _config_run_dynamic_context:
[run] dynamic_context
@@ -216,14 +228,6 @@ measurement or reporting. Ignored if ``source`` is set. See :ref:`source` for
details.
-.. _config_run_note:
-
-[run] note
-..........
-
-(string) This is now obsolete.
-
-
.. _config_run_omit:
[run] omit
@@ -257,9 +261,9 @@ information.
[run] relative_files
....................
-(*experimental*, boolean, default False) store relative file paths in the data
-file. This makes it easier to measure code in one (or multiple) environments,
-and then report in another. See :ref:`cmd_combine` for details.
+(boolean, default False) store relative file paths in the data file. This
+makes it easier to measure code in one (or multiple) environments, and then
+report in another. See :ref:`cmd_combine` for details.
Note that setting ``source`` has to be done in the configuration file rather
than the command line for this option to work, since the reporting commands
@@ -346,9 +350,18 @@ combined with data for "c:\\myproj\\src\\module.py", and will be reported
against the source file found at "src/module.py".
If you specify more than one list of paths, they will be considered in order.
-The first list that has a match will be used.
+A file path will only be remapped if the result exists. If a path matches a
+list, but the result doesn't exist, the next list will be tried. The first
+list that has an existing result will be used.
+
+Remapping will also be done during reporting, but only within the single data
+file being reported. Combining multiple files requires the ``combine``
+command.
-See :ref:`cmd_combine` for more information.
+The ``--debug=pathmap`` option can be used to log details of the re-mapping of
+paths. See :ref:`the --debug option `.
+
+See :ref:`cmd_combine_remapping` and :ref:`source_glob` for more information.
.. _config_report:
@@ -359,16 +372,31 @@ See :ref:`cmd_combine` for more information.
Settings common to many kinds of reporting.
+.. _config_report_exclude_also:
+
+[report] exclude_also
+.....................
+
+(multi-string) A list of regular expressions. This setting is similar to
+:ref:`config_report_exclude_lines`: it specifies patterns for lines to exclude
+from reporting. This setting is preferred, because it will preserve the
+default exclude patterns instead of overwriting them.
+
+.. versionadded:: 7.2.0
+
+
.. _config_report_exclude_lines:
[report] exclude_lines
......................
(multi-string) A list of regular expressions. Any line of your source code
-containing a match for one of these regexes is excluded from being reported as
+containing a match for one of these regexes is excluded from being reported as
missing. More details are in :ref:`excluding`. If you use this option, you
are replacing all the exclude regexes, so you'll need to also supply the
-"pragma: no cover" regex if you still want to use it.
+"pragma: no cover" regex if you still want to use it. The
+:ref:`config_report_exclude_also` setting can be used to specify patterns
+without overwriting the default set.
You can exclude lines introducing blocks, and the entire block is excluded. If
you exclude a ``def`` line or decorator line, the entire function is excluded.
@@ -384,7 +412,7 @@ you'll exclude any line with three or more of any character. If you write
[report] fail_under
...................
-(float) A target coverage percentage. If the total coverage measurement is
+(float) A target coverage percentage. If the total coverage measurement is
under this value, then exit with a status code of 2. If you specify a
non-integral value, you must also set ``[report] precision`` properly to make
use of the decimal places. A setting of 100 will fail any value under 100,
@@ -409,6 +437,20 @@ warning instead of an exception.
See :ref:`source` for details.
+.. _config_include_namespace_packages:
+
+[report] include_namespace_packages
+...................................
+
+(boolean, default False) When searching for completely un-executed files,
+include directories without ``__init__.py`` files. These are `implicit
+namespace packages`_, and are usually skipped.
+
+.. _implicit namespace packages: https://peps.python.org/pep-0420/
+
+.. versionadded:: 7.0
+
+
.. _config_report_omit:
[report] omit
@@ -598,7 +640,7 @@ section also apply to JSON output, where appropriate.
[json] pretty_print
...................
-(boolean, default false) Controls if the JSON is outputted with whitespace
+(boolean, default false) Controls if the JSON is outputted with white space
formatted for human consumption (True) or for minimum file size (False).
diff --git a/doc/contexts.rst b/doc/contexts.rst
index 1ea45d46e..fbf940405 100644
--- a/doc/contexts.rst
+++ b/doc/contexts.rst
@@ -60,6 +60,8 @@ There are three ways to enable dynamic contexts:
.. _pytest-cov: https://pypi.org/project/pytest-cov/
+.. highlight:: ini
+
The ``[run] dynamic_context`` setting has only one option now. Set it to
``test_function`` to start a new dynamic context for every test function::
diff --git a/doc/contributing.rst b/doc/contributing.rst
index 921c03712..fa7bb9f0c 100644
--- a/doc/contributing.rst
+++ b/doc/contributing.rst
@@ -37,24 +37,24 @@ The coverage.py code is hosted on a GitHub repository at
https://github.com/nedbat/coveragepy. To get a working environment, follow
these steps:
-.. minimum of PYVERSIONS:
+#. `Fork the repo`_ into your own GitHub account. The coverage.py code will
+ then be copied into a GitHub repository at
+ ``https://github.com/GITHUB_USER/coveragepy`` where GITHUB_USER is your
+ GitHub username.
-#. (Optional, but recommended) Create a Python 3.7 virtualenv to work in,
- and activate it.
+#. (Optional) Create a virtualenv to work in, and activate it. There
+ are a number of ways to do this. Use the method you are comfortable with.
#. Clone the repository::
- $ git clone https://github.com/nedbat/coveragepy
+ $ git clone https://github.com/GITHUB_USER/coveragepy
$ cd coveragepy
#. Install the requirements::
- $ pip install -r requirements/dev.pip
+ $ python3 -m pip install -r requirements/dev.in
-#. Install a number of versions of Python. Coverage.py supports a range
- of Python versions. The more you can test with, the more easily your code
- can be used as-is. If you only have one version, that's OK too, but may
- mean more work integrating your contribution.
+ Note: You may need to upgrade pip to install the requirements.
Running the tests
@@ -63,73 +63,109 @@ Running the tests
The tests are written mostly as standard unittest-style tests, and are run with
pytest running under `tox`_::
- $ tox
- py37 create: /Users/nedbat/coverage/trunk/.tox/py37
- py37 installdeps: -rrequirements/pip.pip, -rrequirements/pytest.pip, eventlet==0.25.1, greenlet==0.4.15
- py37 develop-inst: /Users/nedbat/coverage/trunk
- py37 installed: apipkg==1.5,appdirs==1.4.4,attrs==20.3.0,backports.functools-lru-cache==1.6.4,-e git+git@github.com:nedbat/coveragepy.git@36ef0e03c0439159c2245d38de70734fa08cddb4#egg=coverage,decorator==5.0.7,distlib==0.3.1,dnspython==2.1.0,eventlet==0.25.1,execnet==1.8.0,filelock==3.0.12,flaky==3.7.0,future==0.18.2,greenlet==0.4.15,hypothesis==6.10.1,importlib-metadata==4.0.1,iniconfig==1.1.1,monotonic==1.6,packaging==20.9,pluggy==0.13.1,py==1.10.0,PyContracts @ git+https://github.com/slorg1/contracts@c5a6da27d4dc9985f68e574d20d86000880919c3,pyparsing==2.4.7,pytest==6.2.3,pytest-forked==1.3.0,pytest-xdist==2.2.1,qualname==0.1.0,six==1.15.0,sortedcontainers==2.3.0,toml==0.10.2,typing-extensions==3.10.0.0,virtualenv==20.4.4,zipp==3.4.1
- py37 run-test-pre: PYTHONHASHSEED='376882681'
- py37 run-test: commands[0] | python setup.py --quiet clean develop
- py37 run-test: commands[1] | python igor.py zip_mods remove_extension
- py37 run-test: commands[2] | python igor.py test_with_tracer py
- === CPython 3.7.10 with Python tracer (.tox/py37/bin/python) ===
+ % python3 -m tox
+ ROOT: tox-gh won't override envlist because tox is not running in GitHub Actions
+ .pkg: _optional_hooks> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: get_requires_for_build_editable> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: build_editable> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ py37: install_package> python -m pip install -U --force-reinstall --no-deps .tox/.tmp/package/87/coverage-7.2.3a0.dev1-0.editable-cp37-cp37m-macosx_10_15_x86_64.whl
+ py37: commands[0]> python igor.py zip_mods
+ py37: commands[1]> python setup.py --quiet build_ext --inplace
+ py37: commands[2]> python -m pip install -q -e .
+ py37: commands[3]> python igor.py test_with_tracer c
+ === CPython 3.7.15 with C tracer (.tox/py37/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ...........................................................................................................................................s............... [ 47%]
- ...........................................s...................................................................................sss.sssssssssssssssssss..... [ 63%]
- ........................................................................................................................................................s.. [ 79%]
- ......................................s..................................s................................................................................. [ 95%]
- ........................................ss...... [100%]
- 949 passed, 29 skipped in 40.56s
- py37 run-test: commands[3] | python setup.py --quiet build_ext --inplace
- py37 run-test: commands[4] | python igor.py test_with_tracer c
- === CPython 3.7.10 with C tracer (.tox/py37/bin/python) ===
+ .........................................................................................................................x.................s....s....... [ 11%]
+ ..s.....x.............................................s................................................................................................. [ 22%]
+ ........................................................................................................................................................ [ 34%]
+ ........................................................................................................................................................ [ 45%]
+ ........................................................................................................................................................ [ 57%]
+ .........s....................................................................................................................s......................... [ 68%]
+ .................................s..............................s...............s..................................s.................................... [ 80%]
+ ........................................................s............................................................................................... [ 91%]
+ ......................................s......................................................................... [100%]
+ 1316 passed, 12 skipped, 2 xfailed in 36.42s
+ py37: commands[4]> python igor.py remove_extension
+ py37: commands[5]> python igor.py test_with_tracer py
+ === CPython 3.7.15 with Python tracer (.tox/py37/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ......................................................................s.................................................................................... [ 47%]
- ........................................................................................................................................................... [ 63%]
- ..........................s................................................s............................................................................... [ 79%]
- .................................................................................s......................................................................... [ 95%]
- ......................................s......... [100%]
- 973 passed, 5 skipped in 41.36s
- ____________________________________________________________________________ summary _____________________________________________________________________________
- py37: commands succeeded
- congratulations :)
+ ................................................................................................x...........................x.................s......... [ 11%]
+ .....s.............s.s.....................................................s..............ss............................s.ss....ss.ss................... [ 22%]
+ ......................................................................................................................................s................. [ 34%]
+ ..................................................................................................................s..................................... [ 45%]
+ ...................s.ss.....................................................................................s....................s.ss................... [ 57%]
+ ..................s.s................................................................................................................................... [ 68%]
+ ..........................s.........................................ssss...............s.................s...sss..................s...ss...ssss.s....... [ 80%]
+ .......................................................................................................................................................s [ 91%]
+ .........................................................................s.................................ss.... [100%]
+ 1281 passed, 47 skipped, 2 xfailed in 33.86s
+ .pkg: _exit> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ py37: OK (82.38=setup[2.80]+cmd[0.20,0.35,7.30,37.20,0.21,34.32] seconds)
+ congratulations :) (83.61 seconds)
Tox runs the complete test suite twice for each version of Python you have
-installed. The first run uses the Python implementation of the trace function,
-the second uses the C implementation.
+installed. The first run uses the C implementation of the trace function,
+the second uses the Python implementation.
To limit tox to just a few versions of Python, use the ``-e`` switch::
- $ tox -e py37,py39
-
-To run just a few tests, you can use `pytest test selectors`_::
-
- $ tox tests/test_misc.py
- $ tox tests/test_misc.py::HasherTest
- $ tox tests/test_misc.py::HasherTest::test_string_hashing
-
-These command run the tests in one file, one class, and just one test,
-respectively.
+ $ python3 -m tox -e py37,py39
+
+On the tox command line, options after ``--`` are passed to pytest. To run
+just a few tests, you can use `pytest test selectors`_::
+
+ $ python3 -m tox -- tests/test_misc.py
+ $ python3 -m tox -- tests/test_misc.py::HasherTest
+ $ python3 -m tox -- tests/test_misc.py::HasherTest::test_string_hashing
+
+These commands run the tests in one file, one class, and just one test,
+respectively. The pytest ``-k`` option selects tests based on a word in their
+name, which can be very convenient for ad-hoc test selection. Of course you
+can combine tox and pytest options::
+
+ $ python3 -m tox -q -e py37 -- -n 0 -vv -k hash
+ === CPython 3.7.15 with C tracer (.tox/py37/bin/python) ===
+ ======================================= test session starts ========================================
+ platform darwin -- Python 3.7.15, pytest-7.2.2, pluggy-1.0.0 -- /Users/nedbat/coverage/.tox/py37/bin/python
+ cachedir: .tox/py37/.pytest_cache
+ rootdir: /Users/nedbat/coverage, configfile: setup.cfg
+ plugins: flaky-3.7.0, hypothesis-6.70.0, xdist-3.2.1
+ collected 1330 items / 1320 deselected / 10 selected
+ run-last-failure: no previously failed tests, not deselecting items.
+
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_lines PASSED [ 10%]
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_arcs PASSED [ 20%]
+ tests/test_data.py::CoverageDataTest::test_add_to_lines_hash_with_missing_file PASSED [ 30%]
+ tests/test_data.py::CoverageDataTest::test_add_to_arcs_hash_with_missing_file PASSED [ 40%]
+ tests/test_execfile.py::RunPycFileTest::test_running_hashed_pyc PASSED [ 50%]
+ tests/test_misc.py::HasherTest::test_string_hashing PASSED [ 60%]
+ tests/test_misc.py::HasherTest::test_bytes_hashing PASSED [ 70%]
+ tests/test_misc.py::HasherTest::test_unicode_hashing PASSED [ 80%]
+ tests/test_misc.py::HasherTest::test_dict_hashing PASSED [ 90%]
+ tests/test_misc.py::HasherTest::test_dict_collision PASSED [100%]
+
+ =============================== 10 passed, 1320 deselected in 1.88s ================================
+ Skipping tests with Python tracer: Only one tracer: no Python tracer for CPython
+ py37: OK (12.22=setup[2.19]+cmd[0.20,0.36,6.57,2.51,0.20,0.19] seconds)
+ congratulations :) (13.10 seconds)
You can also affect the test runs with environment variables. Define any of
these as 1 to use them:
-- COVERAGE_NO_PYTRACER: disables the Python tracer if you only want to run the
- CTracer tests.
+- ``COVERAGE_NO_PYTRACER=1`` disables the Python tracer if you only want to
+ run the CTracer tests.
-- COVERAGE_NO_CTRACER: disables the C tracer if you only want to run the
+- ``COVERAGE_NO_CTRACER=1`` disables the C tracer if you only want to run the
PyTracer tests.
-- COVERAGE_ONE_TRACER: only use one tracer for each Python version. This will
- use the C tracer if it is available, or the Python tracer if not.
+- ``COVERAGE_ONE_TRACER=1`` will use only one tracer for each Python version.
+ This will use the C tracer if it is available, or the Python tracer if not.
-- COVERAGE_AST_DUMP: will dump the AST tree as it is being used during code
- parsing.
+- ``COVERAGE_AST_DUMP=1`` will dump the AST tree as it is being used during
+ code parsing.
+There are other environment variables that affect tests. I use `set_env.py`_
+as a simple terminal interface to see and set them.
Of course, run all the tests on every version of Python you have, before
submitting a change.
@@ -140,25 +176,56 @@ submitting a change.
Lint, etc
---------
-I try to keep the coverage.py as clean as possible. I use pylint to alert me
-to possible problems::
+I try to keep the coverage.py source as clean as possible. I use pylint to
+alert me to possible problems::
$ make lint
- pylint coverage setup.py tests
- python -m tabnanny coverage setup.py tests
- python igor.py check_eol
The source is pylint-clean, even if it's because there are pragmas quieting
some warnings. Please try to keep it that way, but don't let pylint warnings
keep you from sending patches. I can clean them up.
Lines should be kept to a 100-character maximum length. I recommend an
-`editorconfig.org`_ plugin for your editor of choice.
+`editorconfig.org`_ plugin for your editor of choice, which will also help with
+indentation, line endings and so on.
Other style questions are best answered by looking at the existing code.
Formatting of docstrings, comments, long lines, and so on, should match the
code that already exists.
+Many people love `black`_, but I would prefer not to run it on coverage.py.
+
+
+Continuous integration
+----------------------
+
+When you make a pull request, `GitHub actions`__ will run all of the tests and
+quality checks on your changes. If any fail, either fix them or ask for help.
+
+__ https://github.com/nedbat/coveragepy/actions
+
+
+Dependencies
+------------
+
+Coverage.py has no direct runtime dependencies, and I would like to keep it
+that way.
+
+It has many development dependencies. These are specified generically in the
+``requirements/*.in`` files. The .in files should have no versions specified
+in them. The specific versions to use are pinned in ``requirements/*.pip``
+files. These are created by running ``make upgrade``.
+
+.. minimum of PYVERSIONS:
+
+It's important to use Python 3.7 to run ``make upgrade`` so that the pinned
+versions will work on all of the Python versions currently supported by
+coverage.py.
+
+If for some reason we need to constrain a version of a dependency, the
+constraint should be specified in the ``requirements/pins.pip`` file, with a
+detailed reason for the pin.
+
Coverage testing coverage.py
----------------------------
@@ -180,6 +247,12 @@ When you are ready to contribute a change, any way you can get it to me is
probably fine. A pull request on GitHub is great, but a simple diff or
patch works too.
+All contributions are expected to include tests for new functionality and
+fixes. If you need help writing tests, please ask.
+
+.. _fork the repo: https://docs.github.com/en/get-started/quickstart/fork-a-repo
.. _editorconfig.org: http://editorconfig.org
.. _tox: https://tox.readthedocs.io/
+.. _black: https://pypi.org/project/black/
+.. _set_env.py: https://nedbatchelder.com/blog/201907/set_envpy.html
diff --git a/doc/dbschema.rst b/doc/dbschema.rst
index 9b5ee8837..b576acaaf 100644
--- a/doc/dbschema.rst
+++ b/doc/dbschema.rst
@@ -19,8 +19,9 @@ be preferred to accessing the database directly. Only advanced uses will need
to use the database.
The schema can change without changing the major version of coverage.py, so be
-careful when accessing the database directly. The `coverage_schema` table has
-the schema number of the database. The schema described here corresponds to:
+careful when accessing the database directly. The ``coverage_schema`` table
+has the schema number of the database. The schema described here corresponds
+to:
.. [[[cog
from coverage.sqldata import SCHEMA_VERSION
@@ -49,11 +50,11 @@ This is the database schema:
.. [[[cog
import textwrap
from coverage.sqldata import SCHEMA
- print(".. code::")
+ print(".. code-block:: sql")
print()
print(textwrap.indent(SCHEMA, " "))
.. ]]]
-.. code::
+.. code-block:: sql
CREATE TABLE coverage_schema (
-- One row, to record the version of the schema in this db.
@@ -65,7 +66,7 @@ This is the database schema:
key text,
value text,
unique (key)
- -- Keys:
+ -- Possible keys:
-- 'has_arcs' boolean -- Is this data recording branches?
-- 'sys_argv' text -- The coverage command line that recorded the data.
-- 'version' text -- The version of coverage.py that made the file.
@@ -115,7 +116,7 @@ This is the database schema:
foreign key (file_id) references file (id)
);
-.. [[[end]]] (checksum: 207fbab355481686e0dce0a9d99d173c)
+.. [[[end]]] (checksum: 6a04d14b07f08f86cccf43056328dcb7)
.. _numbits:
diff --git a/doc/dict.txt b/doc/dict.txt
index 2c713fe70..41d8c94f4 100644
--- a/doc/dict.txt
+++ b/doc/dict.txt
@@ -1,18 +1,36 @@
+API
+BOM
+BTW
+CPython
+CTracer
+Cobertura
+Consolas
+Cython
+DOCTYPE
+DOM
+HTML
+Jinja
+Mako
+OK
+PYTHONPATH
+TODO
+Tidelift
+URL
+UTF
+XML
activestate
-api
apache
-API
+api
args
argv
ascii
+async
basename
basenames
bitbucket
-BOM
bom
boolean
booleans
-BTW
btw
builtin
builtins
@@ -27,7 +45,6 @@ canonicalizes
chdir'd
clickable
cmdline
-Cobertura
codecs
colorsys
combinable
@@ -38,17 +55,16 @@ configurability
configurability's
configurer
configurers
-Consolas
cov
coveragepy
coveragerc
covhtml
-CPython
css
-CTracer
-Cython
+dataio
datetime
deallocating
+debounce
+decodable
dedent
defaultdict
deserialize
@@ -62,8 +78,6 @@ docstring
docstrings
doctest
doctests
-DOCTYPE
-DOM
encodable
encodings
endfor
@@ -75,6 +89,7 @@ exec'ing
execfile
executability
executable's
+execv
expr
extensibility
favicon
@@ -96,10 +111,10 @@ github
gitignore
globals
greenlet
+hintedness
hotkey
hotkeys
html
-HTML
htmlcov
http
https
@@ -111,15 +126,13 @@ ints
invariants
iterable
iterables
-Jinja
-jquery
jQuery
+jquery
json
jython
kwargs
lcov
localStorage
-Mako
manylinux
matcher
matchers
@@ -136,8 +149,10 @@ monospaced
morf
morfs
multi
+multiproc
mumbo
mycode
+mypy
namespace
namespaces
nano
@@ -145,13 +160,14 @@ nbsp
ned
nedbat
nedbatchelder
+newb
+nocover
nosetests
nullary
num
numbits
numpy
ok
-OK
opcode
opcodes
optparse
@@ -161,13 +177,15 @@ overridable
parallelizing
parsable
parsers
+pathlib
pathnames
plugin
plugins
pragma
-pragmas
pragma'd
+pragmas
pre
+premain
prepended
prepending
programmability
@@ -175,17 +193,19 @@ programmatically
py
py's
pyc
+pyenv
pyexpat
+pylib
pylint
pyproject
pypy
pytest
pythonpath
-PYTHONPATH
pyw
rcfile
readme
readthedocs
+realpath
recordable
refactored
refactoring
@@ -194,9 +214,11 @@ regex
regexes
reimplemented
renderer
+rootname
runnable
runtime
scrollbar
+septatrix
serializable
settrace
setuptools
@@ -217,13 +239,10 @@ symlink
symlinks
syntaxes
sys
-templite
templating
+templite
testability
-Tidelift
-timestamp
todo
-TODO
tokenization
tokenize
tokenized
@@ -241,7 +260,6 @@ txt
ubuntu
undecodable
unexecutable
-unexecuted
unicode
uninstall
unittest
@@ -249,19 +267,17 @@ unparsable
unrunnable
unsubscriptable
untokenizable
+usecache
username
-URL
-UTF
utf
vendored
versionadded
virtualenv
-whitespace
wikipedia
wildcard
wildcards
www
+xdist
xml
-XML
xrange
xyzzy
diff --git a/doc/excluding.rst b/doc/excluding.rst
index b89d449c5..e9d28f156 100644
--- a/doc/excluding.rst
+++ b/doc/excluding.rst
@@ -7,6 +7,8 @@
Excluding code from coverage.py
===============================
+.. highlight:: python
+
You may have code in your project that you know won't be executed, and you want
to tell coverage.py to ignore it. For example, you may have debugging-only
code that won't be executed during your unit tests. You can tell coverage.py to
@@ -71,19 +73,20 @@ If the matched line introduces a block, the entire block is excluded from
reporting. Matching a ``def`` line or decorator line will exclude an entire
function.
+.. highlight:: ini
+
For example, you might decide that __repr__ functions are usually only used in
debugging code, and are uninteresting to test themselves. You could exclude
all of them by adding a regex to the exclusion list::
[report]
- exclude_lines =
+ exclude_also =
def __repr__
For example, here's a list of exclusions I've used::
[report]
- exclude_lines =
- pragma: no cover
+ exclude_also =
def __repr__
if self.debug:
if settings.DEBUG
@@ -91,12 +94,14 @@ For example, here's a list of exclusions I've used::
raise NotImplementedError
if 0:
if __name__ == .__main__.:
+ if TYPE_CHECKING:
class .*\bProtocol\):
@(abc\.)?abstractmethod
-Note that when using the ``exclude_lines`` option in a configuration file, you
-are taking control of the entire list of regexes, so you need to re-specify the
-default "pragma: no cover" match if you still want it to apply.
+The :ref:`config_report_exclude_also` option adds regexes to the built-in
+default list so that you can add your own exclusions. The older
+:ref:`config_report_exclude_lines` option completely overwrites the list of
+regexes.
The regexes only have to match part of a line. Be careful not to over-match. A
value of ``...`` will match any line with more than three characters in it.
diff --git a/doc/faq.rst b/doc/faq.rst
index e60942833..d4f5a565e 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -11,6 +11,22 @@ FAQ and other help
Frequently asked questions
--------------------------
+Q: Why are some of my files not measured?
+.........................................
+
+Coverage.py has a number of mechanisms for deciding which files to measure and
+which to skip. If your files aren't being measured, use the ``--debug=trace``
+:ref:`option `, also settable as ``[run] debug=trace`` in the
+:ref:`settings file `, or as ``COVERAGE_DEBUG=trace`` in an
+environment variable.
+
+This will write a line for each file considered, indicating whether it is
+traced or not, and if not, why not. Be careful though: the output might be
+swallowed by your test runner. If so, a ``COVERAGE_DEBUG_FILE=/tmp/cov.out``
+environment variable can direct the output to a file instead to ensure you see
+everything.
+
+
Q: Why do unexecutable lines show up as executed?
.................................................
@@ -23,11 +39,24 @@ If old data is persisting, you can use an explicit ``coverage erase`` command
to clean out the old data.
+Q: Why are my function definitions marked as run when I haven't tested them?
+............................................................................
+
+The ``def`` and ``class`` lines in your Python file are executed when the file
+is imported. Those are the lines that define your functions and classes. They
+run even if you never call the functions. It's the body of the functions that
+will be marked as not executed if you don't test them, not the ``def`` lines.
+
+This can mean that your code has a moderate coverage total even if no tests
+have been written or run. This might seem surprising, but it is accurate: the
+``def`` lines have actually been run.
+
+
Q: Why do the bodies of functions show as executed, but the def lines do not?
.............................................................................
-This happens because coverage.py is started after the functions are defined.
-The definition lines are executed without coverage measurement, then
+If this happens, it's because coverage.py has started after the functions are
+defined. The definition lines are executed without coverage measurement, then
coverage.py is started, then the function is called. This means the body is
measured, but the definition of the function itself is not.
@@ -54,7 +83,9 @@ Q: Can I find out which tests ran which lines?
..............................................
Yes! Coverage.py has a feature called :ref:`dynamic_contexts` which can collect
-this information. Add this to your .coveragerc file::
+this information. Add this to your .coveragerc file:
+
+.. code-block:: ini
[run]
dynamic_context = test_function
@@ -90,7 +121,7 @@ Make sure you are using the C trace function. Coverage.py provides two
implementations of the trace function. The C implementation runs much faster.
To see what you are running, use ``coverage debug sys``. The output contains
details of the environment, including a line that says either
-``CTrace: available`` or ``CTracer: unavailable``. If it says unavailable,
+``CTracer: available`` or ``CTracer: unavailable``. If it says unavailable,
then you are using the slow Python implementation.
Try re-installing coverage.py to see what happened and if you get the CTracer
@@ -115,9 +146,9 @@ __ https://nedbatchelder.com/blog/200710/flaws_in_coverage_measurement.html
.. _trialcoverage: https://pypi.org/project/trialcoverage/
- - `pytest-coverage`_
+ - `pytest-cov`_
- .. _pytest-coverage: https://pypi.org/project/pytest-coverage/
+ .. _pytest-cov: https://pypi.org/project/pytest-cov/
- `django-coverage`_ for use with Django.
@@ -127,10 +158,11 @@ __ https://nedbatchelder.com/blog/200710/flaws_in_coverage_measurement.html
Q: Where can I get more help with coverage.py?
..............................................
-You can discuss coverage.py or get help using it on the `Testing In Python`_
-mailing list.
+You can discuss coverage.py or get help using it on the `Python discussion
+forums`_. If you ping me (``@nedbat``), there's a higher chance I'll see the
+post.
-.. _Testing In Python: http://lists.idyll.org/listinfo/testing-in-python
+.. _Python discussion forums: https://discuss.python.org/
Bug reports are gladly accepted at the `GitHub issue tracker`_.
@@ -149,6 +181,6 @@ Coverage.py was originally written by `Gareth Rees`_.
Since 2004, `Ned Batchelder`_ has extended and maintained it with the help of
`many others`_. The :ref:`change history ` has all the details.
-.. _Gareth Rees: http://garethrees.org/
+.. _Gareth Rees: http://garethrees.org/
.. _Ned Batchelder: https://nedbatchelder.com
-.. _many others: https://github.com/nedbat/coveragepy/blob/master/CONTRIBUTORS.txt
+.. _many others: https://github.com/nedbat/coveragepy/blob/master/CONTRIBUTORS.txt
diff --git a/doc/index.rst b/doc/index.rst
index fd2b2b8c2..2475eb402 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -18,18 +18,17 @@ supported on:
.. PYVERSIONS
-* Python versions 3.7 through 3.11.0b4.
-
-* PyPy3 7.3.8.
+* Python versions 3.7 through 3.12.0b1.
+* PyPy3 7.3.11.
.. ifconfig:: prerelease
**This is a pre-release build. The usual warnings about possible bugs
- apply.** The latest stable version is coverage.py 6.4, `described here`_.
-
+ apply.** The latest stable version is coverage.py 6.5.0, `described here`_.
.. _described here: http://coverage.readthedocs.io/
+
For Enterprise
--------------
@@ -57,20 +56,24 @@ Getting started is easy:
#. Install coverage.py::
- $ pip install coverage
+ $ python3 -m pip install coverage
For more details, see :ref:`install`.
#. Use ``coverage run`` to run your test suite and gather data. However you
- normally run your test suite, you can run your test runner under coverage.
- If your test runner command starts with "python", just replace the initial
- "python" with "coverage run".
+ normally run your test suite, you can use your test runner under coverage.
+
+ .. tip::
+ If your test runner command starts with "python", just replace the initial
+ "python" with "coverage run".
+
+ ``python something.py`` becomes ``coverage run something.py``
- Instructions for specific test runners:
+ ``python -m amodule`` becomes ``coverage run -m amodule``
- .. tabs::
+ Other instructions for specific test runners:
- .. tab:: pytest
+ - **pytest**
If you usually use::
@@ -83,7 +86,7 @@ Getting started is easy:
Many people choose to use the `pytest-cov`_ plugin, but for most
purposes, it is unnecessary.
- .. tab:: unittest
+ - **unittest**
Change "python" to "coverage run", so this::
@@ -93,18 +96,18 @@ Getting started is easy:
$ coverage run -m unittest discover
- .. tab:: nosetest
-
- *Nose has been unmaintained for a long time. You should seriously
- consider adopting a different test runner.*
-
- Change this::
-
- $ nosetests arg1 arg2
-
- to::
-
- $ coverage run -m nose arg1 arg2
+ .. - **nosetest**
+ ..
+ .. *Nose has been unmaintained for a long time. You should seriously
+ .. consider adopting a different test runner.*
+ ..
+ .. Change this::
+ ..
+ .. $ nosetests arg1 arg2
+ ..
+ .. to this::
+ ..
+ .. $ coverage run -m nose arg1 arg2
To limit coverage measurement to code in the current directory, and also
find files that weren't executed at all, add the ``--source=.`` argument to
@@ -185,9 +188,10 @@ Getting help
------------
If the :ref:`FAQ ` doesn't answer your question, you can discuss
-coverage.py or get help using it on the `Testing In Python`_ mailing list.
+coverage.py or get help using it on the `Python discussion forums`_. If you
+ping me (``@nedbat``), there's a higher chance I'll see the post.
-.. _Testing In Python: http://lists.idyll.org/listinfo/testing-in-python
+.. _Python discussion forums: https://discuss.python.org/
Bug reports are gladly accepted at the `GitHub issue tracker`_.
GitHub also hosts the `code repository`_.
@@ -203,7 +207,10 @@ using coverage.py.
.. _I can be reached: https://nedbatchelder.com/site/aboutned.html
+.. raw:: html
+ For news and other chatter, follow the project on Mastodon:
+ @coveragepy@hachyderm.io.
More information
----------------
@@ -227,4 +234,5 @@ More information
trouble
faq
Change history
+ migrating
sleepy
diff --git a/doc/install.rst b/doc/install.rst
index f3e015e46..1b940b4bb 100644
--- a/doc/install.rst
+++ b/doc/install.rst
@@ -15,19 +15,19 @@ Installation
You can install coverage.py in the usual ways. The simplest way is with pip::
- $ pip install coverage
+ $ python3 -m pip install coverage
.. ifconfig:: prerelease
To install a pre-release version, you will need to specify ``--pre``::
- $ pip install --pre coverage
+ $ python3 -m pip install --pre coverage
or the exact version you want to install:
.. parsed-literal::
- $ pip install |coverage-equals-release|
+ $ python3 -m pip install |coverage-equals-release|
.. _install_extension:
diff --git a/doc/migrating.rst b/doc/migrating.rst
new file mode 100644
index 000000000..443afac63
--- /dev/null
+++ b/doc/migrating.rst
@@ -0,0 +1,54 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+.. _migrating:
+
+==========================
+Migrating between versions
+==========================
+
+New versions of coverage.py or Python might require you to adjust your
+settings, options, or other aspects how you use coverage.py. This page details
+those changes.
+
+.. _migrating_cov7:
+
+Migrating to coverage.py 7.x
+----------------------------
+
+Consider these changes when migrating to coverage.py 7.x:
+
+- The way that wildcards when specifying file paths work in certain cases has
+ changed in 7.x:
+
+ - Previously, ``*`` would incorrectly match directory separators, making
+ precise matching difficult. Patterns such as ``*tests/*``
+ will need to be changed to ``*/tests/*``.
+
+ - ``**`` now matches any number of nested directories. If you wish to retain
+ the behavior of ``**/tests/*`` in previous versions then ``*/**/tests/*``
+ can be used instead.
+
+- When remapping file paths with ``[paths]``, a path will be remapped only if
+ the resulting path exists. Ensure that remapped ``[paths]`` exist when
+ upgrading as this is now being enforced.
+
+- The :ref:`config_report_exclude_also` setting is new in 7.2.0. It adds
+ exclusion regexes while keeping the default built-in set. It's better than
+ the older :ref:`config_report_exclude_lines` setting, which overwrote the
+ entire list. Newer versions of coverage.py will be adding to the default set
+ of exclusions. Using ``exclude_also`` will let you benefit from those
+ updates.
+
+
+.. _migrating_py312:
+
+Migrating to Python 3.12
+------------------------
+
+Keep these things in mind when running under Python 3.12:
+
+- Python 3.12 now inlines list, dict, and set comprehensions. Previously, they
+ were compiled as functions that were called internally. Coverage.py would
+ warn you if comprehensions weren't fully completed, but this no longer
+ happens with Python 3.12.
diff --git a/doc/plugins.rst b/doc/plugins.rst
index fae4f73be..a289ba7e6 100644
--- a/doc/plugins.rst
+++ b/doc/plugins.rst
@@ -25,21 +25,27 @@ To use a coverage.py plug-in, you install it and configure it. For this
example, let's say there's a Python package called ``something`` that provides
a coverage.py plug-in called ``something.plugin``.
-#. Install the plug-in's package as you would any other Python package::
+#. Install the plug-in's package as you would any other Python package:
- pip install something
+ .. code-block:: sh
+
+ $ python3 -m pip install something
#. Configure coverage.py to use the plug-in. You do this by editing (or
creating) your .coveragerc file, as described in :ref:`config`. The
``plugins`` setting indicates your plug-in. It's a list of importable
- module names of plug-ins::
+ module names of plug-ins:
+
+ .. code-block:: ini
[run]
plugins =
something.plugin
#. If the plug-in needs its own configuration, you can add those settings in
- the .coveragerc file in a section named for the plug-in::
+ the .coveragerc file in a section named for the plug-in:
+
+ .. code-block:: ini
[something.plugin]
option1 = True
diff --git a/doc/python-coverage.1.txt b/doc/python-coverage.1.txt
index 47d447304..9d38f4f73 100644
--- a/doc/python-coverage.1.txt
+++ b/doc/python-coverage.1.txt
@@ -8,7 +8,7 @@ Measure Python code coverage
:Author: Ned Batchelder
:Author: |author|
-:Date: 2022-01-25
+:Date: 2022-12-03
:Copyright: Apache 2.0 license, attribution and disclaimer required.
:Manual section: 1
:Manual group: Coverage.py
@@ -299,6 +299,9 @@ COMMAND REFERENCE
\--fail-under `MIN`
Exit with a status of 2 if the total coverage is less than `MIN`.
+ \--format `FORMAT`
+ Output format, either text (default), markdown, or total.
+
\-i, --ignore-errors
Ignore errors while reading source files.
diff --git a/doc/requirements.in b/doc/requirements.in
index 3a9088c72..42eca4052 100644
--- a/doc/requirements.in
+++ b/doc/requirements.in
@@ -7,11 +7,12 @@
-c ../requirements/pins.pip
cogapp
-doc8
+#doc8
pyenchant
+scriv # for writing GitHub releases
sphinx
sphinx-autobuild
sphinx_rtd_theme
-sphinx-tabs
+#sphinx-tabs
sphinxcontrib-restbuilder
sphinxcontrib-spelling
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 46678fca8..a1894b64f 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -1,84 +1,82 @@
#
-# This file is autogenerated by pip-compile with python 3.7
-# To update, run:
+# This file is autogenerated by pip-compile with Python 3.7
+# by the following command:
#
# make upgrade
#
-alabaster==0.7.12
+alabaster==0.7.13
# via sphinx
-babel==2.10.1
+attrs==23.1.0
+ # via scriv
+babel==2.12.1
# via sphinx
-certifi==2022.5.18.1
+certifi==2023.5.7
# via requests
-charset-normalizer==2.0.12
+charset-normalizer==3.1.0
# via requests
+click==8.1.3
+ # via
+ # click-log
+ # scriv
+click-log==0.4.0
+ # via scriv
cogapp==3.3.0
# via -r doc/requirements.in
-colorama==0.4.4
+colorama==0.4.6
# via sphinx-autobuild
-doc8==0.11.2
- # via -r doc/requirements.in
-docutils==0.17.1
+docutils==0.18.1
# via
- # -c doc/../requirements/pins.pip
- # doc8
- # restructuredtext-lint
# sphinx
# sphinx-rtd-theme
- # sphinx-tabs
-idna==3.3
+idna==3.4
# via requests
-imagesize==1.3.0
+imagesize==1.4.1
# via sphinx
-importlib-metadata==4.11.4
+importlib-metadata==6.6.0
# via
+ # attrs
+ # click
# sphinx
# sphinxcontrib-spelling
- # stevedore
jinja2==3.1.2
- # via sphinx
+ # via
+ # scriv
+ # sphinx
livereload==2.6.3
# via sphinx-autobuild
-markupsafe==2.1.1
+markupsafe==2.1.2
# via jinja2
-packaging==21.3
+packaging==23.1
# via sphinx
-pbr==5.9.0
- # via stevedore
pyenchant==3.2.2
# via
# -r doc/requirements.in
# sphinxcontrib-spelling
-pygments==2.12.0
+pygments==2.15.1
+ # via sphinx
+pytz==2023.3
+ # via babel
+requests==2.31.0
# via
- # doc8
+ # scriv
# sphinx
- # sphinx-tabs
-pyparsing==3.0.9
- # via packaging
-pytz==2022.1
- # via babel
-requests==2.27.1
- # via sphinx
-restructuredtext-lint==1.4.0
- # via doc8
+scriv==1.3.1
+ # via -r doc/requirements.in
six==1.16.0
# via livereload
snowballstemmer==2.2.0
# via sphinx
-sphinx==4.5.0
+sphinx==5.3.0
# via
# -r doc/requirements.in
# sphinx-autobuild
# sphinx-rtd-theme
- # sphinx-tabs
+ # sphinxcontrib-jquery
# sphinxcontrib-restbuilder
# sphinxcontrib-spelling
sphinx-autobuild==2021.3.14
# via -r doc/requirements.in
-sphinx-rtd-theme==1.0.0
- # via -r doc/requirements.in
-sphinx-tabs==3.3.1
+sphinx-rtd-theme==1.2.1
# via -r doc/requirements.in
sphinxcontrib-applehelp==1.0.2
# via sphinx
@@ -86,6 +84,8 @@ sphinxcontrib-devhelp==1.0.2
# via sphinx
sphinxcontrib-htmlhelp==2.0.0
# via sphinx
+sphinxcontrib-jquery==4.1
+ # via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==1.0.3
@@ -94,15 +94,13 @@ sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
sphinxcontrib-serializinghtml==1.1.5
# via sphinx
-sphinxcontrib-spelling==7.5.0
+sphinxcontrib-spelling==8.0.0
# via -r doc/requirements.in
-stevedore==3.5.0
- # via doc8
-tornado==6.1
+tornado==6.2
# via livereload
-typing-extensions==4.2.0
+typing-extensions==4.6.2
# via importlib-metadata
-urllib3==1.26.9
+urllib3==2.0.2
# via requests
-zipp==3.8.0
+zipp==3.15.0
# via importlib-metadata
diff --git a/doc/sample_html/coverage_html.js b/doc/sample_html/coverage_html.js
index 084a4970c..4c321182c 100644
--- a/doc/sample_html/coverage_html.js
+++ b/doc/sample_html/coverage_html.js
@@ -166,7 +166,7 @@ coverage.wire_up_filter = function () {
// Trigger change event on setup, to force filter on page refresh
// (filter value may still be present).
- document.getElementById("filter").dispatchEvent(new Event("change"));
+ document.getElementById("filter").dispatchEvent(new Event("input"));
};
coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
@@ -214,7 +214,7 @@ coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
coverage.pyfile_ready = function () {
// If we're directed to a particular line number, highlight the line.
var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
+ if (frag.length > 2 && frag[1] === "t") {
document.querySelector(frag).closest(".n").classList.add("highlight");
coverage.set_sel(parseInt(frag.substr(2), 10));
} else {
@@ -257,6 +257,10 @@ coverage.pyfile_ready = function () {
coverage.init_scroll_markers();
coverage.wire_up_sticky_header();
+ document.querySelectorAll("[id^=ctxs]").forEach(
+ cbox => cbox.addEventListener("click", coverage.expand_contexts)
+ );
+
// Rebuild scroll markers when the window height changes.
window.addEventListener("resize", coverage.build_scroll_markers);
};
@@ -528,14 +532,14 @@ coverage.scroll_window = function (to_pos) {
coverage.init_scroll_markers = function () {
// Init some variables
- coverage.lines_len = document.querySelectorAll('#source > p').length;
+ coverage.lines_len = document.querySelectorAll("#source > p").length;
// Build html
coverage.build_scroll_markers();
};
coverage.build_scroll_markers = function () {
- const temp_scroll_marker = document.getElementById('scroll_marker')
+ const temp_scroll_marker = document.getElementById("scroll_marker")
if (temp_scroll_marker) temp_scroll_marker.remove();
// Don't build markers if the window has no scroll bar.
if (document.body.scrollHeight <= window.innerHeight) {
@@ -549,11 +553,11 @@ coverage.build_scroll_markers = function () {
const scroll_marker = document.createElement("div");
scroll_marker.id = "scroll_marker";
- document.getElementById('source').querySelectorAll(
- 'p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par'
+ document.getElementById("source").querySelectorAll(
+ "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
).forEach(element => {
const line_top = Math.floor(element.offsetTop * marker_scale);
- const line_number = parseInt(element.id.substr(1));
+ const line_number = parseInt(element.querySelector(".n a").id.substr(1));
if (line_number === previous_line + 1) {
// If this solid missed block just make previous mark higher.
@@ -577,24 +581,40 @@ coverage.build_scroll_markers = function () {
};
coverage.wire_up_sticky_header = function () {
- const header = document.querySelector('header');
+ const header = document.querySelector("header");
const header_bottom = (
- header.querySelector('.content h2').getBoundingClientRect().top -
+ header.querySelector(".content h2").getBoundingClientRect().top -
header.getBoundingClientRect().top
);
function updateHeader() {
if (window.scrollY > header_bottom) {
- header.classList.add('sticky');
+ header.classList.add("sticky");
} else {
- header.classList.remove('sticky');
+ header.classList.remove("sticky");
}
}
- window.addEventListener('scroll', updateHeader);
+ window.addEventListener("scroll", updateHeader);
updateHeader();
};
+coverage.expand_contexts = function (e) {
+ var ctxs = e.target.parentNode.querySelector(".ctxs");
+
+ if (!ctxs.classList.contains("expanded")) {
+ var ctxs_text = ctxs.textContent;
+ var width = Number(ctxs_text[0]);
+ ctxs.textContent = "";
+ for (var i = 1; i < ctxs_text.length; i += width) {
+ key = ctxs_text.substring(i, i + width).trim();
+ ctxs.appendChild(document.createTextNode(contexts[key]));
+ ctxs.appendChild(document.createElement("br"));
+ }
+ ctxs.classList.add("expanded");
+ }
+};
+
document.addEventListener("DOMContentLoaded", () => {
if (document.body.classList.contains("indexfile")) {
coverage.index_ready();
diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
index 70cd698cc..c5ac367ec 100644
--- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
+++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
@@ -55,8 +55,8 @@
- 2 statements
-
+ 1 statements
+
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v6.4.2,
- created at 2022-07-12 09:07 -0400
+ coverage.py v7.2.7,
+ created at 2023-05-29 15:26 -0400