pax_global_header00006660000000000000000000000064150512233330014507gustar00rootroot0000000000000052 comment=fac830885b9e9a1ca26a374d63036a9f25cc2066 Erotemic-xdoctest-fac8308/000077500000000000000000000000001505122333300155275ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/.circleci/000077500000000000000000000000001505122333300173625ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/.circleci/config.yml000066400000000000000000000260261505122333300213600ustar00rootroot00000000000000# Python CircleCI 2.0 configuration file # Check https://circleci.com/docs/2.0/language-python/ for more details # # References: # # how to setup multiple python versions # https://stackoverflow.com/questions/948354/default-behavior-of-git-push-without-a-branch-specified # https://github.com/adambrenecki/virtualfish/blob/aa3d6271bcb86ad27b6d24f96b5bd386d176f588/.circleci/config.yml # # # Multiple files for a checksum # https://discuss.circleci.com/t/cant-checksum-multiple-files-with-slashes-in-the-file-path/20667/2 # # # Auto Cancel Redundant Builds # https://circleci.com/docs/2.0/skip-build/#steps-to-enable-auto-cancel-for-pipelines-triggered-by-pushes-to-github-or-the-api # https://app.circleci.com/settings/project/github/Erotemic/xdoctest/advanced?return-to=https%3A%2F%2Fapp.circleci.com%2Fpipelines%2Fgithub%2FErotemic%2Fxdoctest # Abuse YAML notation to make a heredoc. This will be ignored by the CI. __doc__: &__doc__ - | __doc__=""" Main CI has moved to github actions """ # " # hack for vim yml syntax highlighter version: 2 workflows: version: 2 test: jobs: - test_full/cp3_10-3_10-linux: filters: &__ignore_release__ # this yaml anchor to ignore tests on the release branch branches: ignore: - release - test_full/cp39-39-linux: filters: <<: *__ignore_release__ - test_full/cp38-38-linux: filters: <<: *__ignore_release__ - test_minimal/cp3_10-3_10-linux: filters: <<: *__ignore_release__ - test_minimal/cp39-39-linux: filters: <<: *__ignore_release__ - test_minimal/cp38-38-linux: filters: <<: *__ignore_release__ - test_minimal/pypy3: filters: <<: *__ignore_release__ - test_full/pypy3: filters: <<: *__ignore_release__ jobs: ########### # TEMPLATES ########### .common_template: &common_template environment: # Setting the python executable environ allows template reuse for pypy - PYTHON_EXE: python - PIP_DOWNLOAD_CACHE=./pipcache docker: - image: cimg/python # We shouldnt need a lot of resources to run resource_class: small steps: - checkout #working_directory: ~/{{ .Environment.CIRCLE_JOB }} .test_minimal_template: &test_minimal_template <<: - *common_template steps: - checkout ### INITIALIZE AND CACHE REQUIREMENTS ### - restore_cache: keys: - v5-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ .Environment.CIRCLE_JOB }} - run: name: install dependencies command: | pwd ls -al $PYTHON_EXE -m venv venv || virtualenv -v venv # first command is python3 || second is python2 . venv/bin/activate # The "minimal" tests install barebones requirements pip install pip -U pip install -r requirements/tests.txt pip install -r requirements/runtime.txt pip install . - save_cache: paths: - ./pipcache key: v5-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ .Environment.CIRCLE_JOB }} # ### RUN TESTS ### - run: name: run tests command: | . venv/bin/activate ls $PYTHON_EXE -m pip freeze $PYTHON_EXE run_tests.py # Upload to codecov.io (requires a CODECOV_TOKEN environ or github+circleci integration) #codecov #- store_artifacts: # path: test-reports # destination: test-reports #- store_artifacts: # path: .coverage # destination: .coverage .test_full_template: &test_full_template <<: - *common_template steps: - checkout ### INITIALIZE AND CACHE REQUIREMENTS ### - restore_cache: keys: - v5-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ checksum "requirements/colors.txt" }}-{{ .Environment.CIRCLE_JOB }} - run: name: install dependencies command: | pwd ls -al $PYTHON_EXE -m venv venv || virtualenv -v venv # first command is python3 || second is python2 . venv/bin/activate # The "full" dependency install pip install pip -U pip install scikit-build pip install -r requirements.txt pip install . - save_cache: paths: #- ./venv - ./pipcache key: v5-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ checksum "requirements/colors.txt" }}-{{ .Environment.CIRCLE_JOB }} # ### RUN TESTS ### - run: name: run tests command: | . venv/bin/activate ls $PYTHON_EXE -m pip freeze $PYTHON_EXE run_tests.py # Upload to codecov.io (requires a CODECOV_TOKEN environ or github+circleci integration) #codecov #- store_artifacts: # path: test-reports # destination: test-reports #- store_artifacts: # path: .coverage # destination: .coverage ################################### ### INHERIT FROM BASE TEMPLATES ### ################################### # Define tests fo the other python verisons using the "test3.6" template # and indicating what needs to be modified. # # All we need to do is change the base docker image so python is the # version we want we can reuse everything else from the template # test_full/cp3_10-3_10-linux: <<: *test_full_template docker: - image: cimg/python:3.10.0 test_full/cp39-39-linux: <<: *test_full_template docker: - image: cimg/python:3.9 test_full/cp38-38-linux: <<: *test_full_template docker: - image: cimg/python:3.8 # ------------ test_minimal/cp3_10-3_10-linux: <<: *test_minimal_template docker: - image: cimg/python:3.10 test_minimal/cp39-39-linux: <<: *test_minimal_template docker: - image: cimg/python:3.9 test_minimal/cp38-38-linux: <<: *test_minimal_template docker: - image: cimg/python:3.8 # --- pypy ---- test_minimal/pypy3: <<: *test_minimal_template docker: - image: pypy:3 environment: - PYTHON_EXE: pypy3 test_full/pypy3: <<: *test_full_template docker: - image: pypy:3 environment: - PYTHON_EXE: pypy3 .__doc__: &__doc__ - | IMAGE_NAME=cimg/python:3.9 docker pull $IMAGE_NAME IMAGE_NAME=pypy:3 docker pull $IMAGE_NAME docker run -v $HOME/code/xdoctest:/io -it $IMAGE_NAME bash IMAGE_NAME=cimg/python:3.10 docker pull $IMAGE_NAME docker run -v $HOME/code/xdoctest:/io -it pypy:3 bash IMAGE_NAME=cimg/python:3.10 docker run -v $HOME/code/xdoctest:/io -it $IMAGE_NAME bash IMAGE_NAME=circleci/python:3.4 docker run -v $HOME/code/xdoctest:/io -it $IMAGE_NAME bash git clone /io $HOME/repo cd $HOME/repo source dev/make_strict_req.sh pip install -r requirements-strict.txt --user PATH=/home/circleci/.local/bin:$PAT:$PATH pip install -e . --user python -m pytest python -m pip wheel --no-deps --wheel-dir wheelhouse /io WHEEL_FPATH=$(ls wheelhouse/* -t| head -n1) echo $WHEEL_FPATH python -m pip wheel --wheel-dir wheelhouse /io cd /io # Logic to print out the commands to reproduce CI steps source $HOME/local/init/utils.sh pyblock " import yaml import ubelt as ub data = yaml.safe_load(open(ub.expandpath('$HOME/code/xdoctest/.circleci/config.yml'))) JOB_NAME = 'test_minimal/pypy3' job = data['jobs'][JOB_NAME] IMAGE_NAME = job['docker'][0]['image'] print('IMAGE_NAME={}'.format(IMAGE_NAME)) print('docker run -v $HOME/code/xdoctest:/io -it {} bash'.format(IMAGE_NAME)) print(ub.codeblock( ''' ### ### # Clone the mounted repo for a fresh start mkdir -p $HOME/code git clone /io /root/{JOB_NAME} cd /root/{JOB_NAME} ''').format(JOB_NAME=JOB_NAME)) for kv in job['environment']: for k, v in kv.items(): print('{}={}'.format(k, v)) for step in job['steps']: if 'run' in step: print(step['run']['command']) " IMAGE_NAME=pypy:3 docker run -v $HOME/code/xdoctest:/io -it pypy:3 bash ### ### # Clone the mounted repo for a fresh start mkdir -p /home/joncrall/code git clone /io /root/test_minimal/pypy3 cd /root/test_minimal/pypy3 PYTHON_EXE=pypy3 $PYTHON_EXE -m venv venv || virtualenv -v venv # first command is python3 || second is python2 . venv/bin/activate # The "minimal" tests install barebones requirements pip install pip -U pip install -r requirements/tests.txt pip install -r requirements/runtime.txt pip install . . venv/bin/activate python -m pytest --cov=xdoctest --cov-config .coveragerc --cov-report term -s # pip install pytest-cov==2.8.1 # hack to avoid regression #python run_tests.py # TO RUN A JOB ON YOUR LOCAL MACHINE # INSTALL CIRCLE CI curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | DESTDIR=$HOME/.local/bin bash JOB_NAME=test_minimal/pypy3 circleci local execute --job $JOB_NAME JOB_NAME=test_full/pypy3 circleci local execute --job $JOB_NAME # Run circleci scripts on a local machine mkdir -p $HOME/Downloads curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | DESTDIR=$HOME/Downloads bash $HOME/.local/bin/circleci update $HOME/.local/bin/circleci switch $HOME/.local/bin/circleci config validate $HOME/.local/bin/circleci local execute --job test_minimal/3.9 $HOME/.local/bin/circleci local execute --job test_minimal/pypy3 $HOME/.local/bin/circleci local execute --job test_full/3.6 $HOME/.local/bin/circleci local execute --config .circleci/config.yml --job test_full/cp38-38-linux $HOME/.local/bin/circleci local execute --config .circleci/config.yml Erotemic-xdoctest-fac8308/.codecov.yml000066400000000000000000000006131505122333300177520ustar00rootroot00000000000000codecov: notify: require_ci_to_pass: no coverage: precision: 2 round: down range: "70...100" status: project: default: threshold: 10% patch: yes changes: no parsers: gcov: branch_detection: conditional: yes loop: yes method: no macro: no comment: layout: "header, diff" behavior: default require_changes: no Erotemic-xdoctest-fac8308/.coveragerc000066400000000000000000000011741505122333300176530ustar00rootroot00000000000000[run] branch = True source = xdoctest [report] exclude_lines = pragma: no cover .* # pragma: no cover .* # nocover def __repr__ raise AssertionError raise NotImplementedError if 0: if six.PY2: if trace is not None verbose = .* raise pass if _debug: if __name__ == .__main__.: print(.*) ^import .* ^from .* import .* omit = # pytest imports this before we can cover it # How can this help us?: # http://pytest-cov.readthedocs.io/en/latest/plugins.html xdoctest/plugin.py xdoctest/__main__.py xdoctest/__init__.py xdoctest/exceptions.py Erotemic-xdoctest-fac8308/.gitattributes000066400000000000000000000001301505122333300204140ustar00rootroot00000000000000*.py text eol=lf *.md text eol=lf *.ini text eol=lf *.txt text eol=lf *.yml text eol=lf Erotemic-xdoctest-fac8308/.github/000077500000000000000000000000001505122333300170675ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/.github/dependabot.yml000066400000000000000000000002671505122333300217240ustar00rootroot00000000000000version: 2 updates: # Maintain dependencies for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" day: "friday" Erotemic-xdoctest-fac8308/.github/workflows/000077500000000000000000000000001505122333300211245ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/.github/workflows/tests.yml000066400000000000000000000536471505122333300230300ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # Based on ~/code/xcookie/xcookie/rc/tests.yml.in # Now based on ~/code/xcookie/xcookie/builders/github_actions.py # See: https://github.com/Erotemic/xcookie name: PurePyCI on: push: pull_request: branches: [ main ] jobs: lint_job: ## # Run quick linting and typing checks. # To disable all linting add "linter=false" to the xcookie config. # To disable type checks add "notypes" to the xcookie tags. ## runs-on: ubuntu-latest steps: - name: Checkout source uses: actions/checkout@v4.2.2 - name: Set up Python 3.13 for linting uses: actions/setup-python@v5.6.0 with: python-version: '3.13' - name: Install dependencies run: |- python -m pip install pip uv -U python -m uv pip install flake8 - name: Lint with flake8 run: |- # stop the build if there are Python syntax errors or undefined names flake8 ./src/xdoctest --count --select=E9,F63,F7,F82 --show-source --statistics - name: Typecheck with mypy run: |- python -m pip install mypy pip install -r requirements/runtime.txt mypy --install-types --non-interactive ./src/xdoctest mypy ./src/xdoctest build_and_test_sdist: ## # Build the pure python package from source and test it in the # same environment. ## name: Build sdist runs-on: ubuntu-latest steps: - name: Checkout source uses: actions/checkout@v4.2.2 - name: Set up Python 3.13 uses: actions/setup-python@v5.6.0 with: python-version: '3.13' - name: Upgrade pip run: |- python -m pip install pip uv -U python -m uv pip install -r requirements/tests.txt python -m uv pip install -r requirements/runtime.txt - name: Build sdist shell: bash run: |- python -m pip install pip uv -U python -m uv pip install setuptools>=0.8 wheel build twine python -m build --sdist --outdir wheelhouse python -m twine check ./wheelhouse/xdoctest*.tar.gz - name: Install sdist run: |- ls -al wheelhouse python -m uv pip install wheelhouse/xdoctest*.tar.gz -v - name: Test minimal loose sdist env: {} run: |- pwd ls -al # Run in a sandboxed directory WORKSPACE_DNAME="testsrcdir_minimal_${CI_PYTHON_VERSION}_${GITHUB_RUN_ID}_${RUNNER_OS}" mkdir -p $WORKSPACE_DNAME cd $WORKSPACE_DNAME # Run the tests # Get path to installed package MOD_DPATH=$(python -c "import xdoctest, os; print(os.path.dirname(xdoctest.__file__))") echo "MOD_DPATH = $MOD_DPATH" python -m pytest --verbose --cov=xdoctest $MOD_DPATH ../tests cd .. - name: Test full loose sdist env: {} run: |- pwd ls -al true # Run in a sandboxed directory WORKSPACE_DNAME="testsrcdir_full_${CI_PYTHON_VERSION}_${GITHUB_RUN_ID}_${RUNNER_OS}" mkdir -p $WORKSPACE_DNAME cd $WORKSPACE_DNAME # Run the tests # Get path to installed package MOD_DPATH=$(python -c "import xdoctest, os; print(os.path.dirname(xdoctest.__file__))") echo "MOD_DPATH = $MOD_DPATH" python -m pytest --verbose --cov=xdoctest $MOD_DPATH ../tests cd .. - uses: actions/upload-artifact@v4.4.0 name: Upload sdist artifact with: name: sdist_wheels path: ./wheelhouse/xdoctest*.tar.gz build_purepy_wheels: ## # Download and test the pure-python wheels that were build in the # build_purepy_wheels and test them in this independent environment. ## name: ${{ matrix.python-version }} on ${{ matrix.os }}, arch=${{ matrix.arch }} with ${{ matrix.install-extras }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: - ubuntu-latest python-version: - '3.13' arch: - auto steps: - name: Checkout source uses: actions/checkout@v4.2.2 - name: Set up QEMU uses: docker/setup-qemu-action@v3.0.0 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python uses: actions/setup-python@v5.6.0 with: python-version: ${{ matrix.python-version }} - name: Build pure wheel shell: bash run: |- python -m pip install pip uv -U python -m uv pip install setuptools>=0.8 wheel build twine python -m build --wheel --outdir wheelhouse python -m twine check ./wheelhouse/xdoctest*.whl - name: Show built files shell: bash run: ls -la wheelhouse - uses: actions/upload-artifact@v4.4.0 name: Upload wheels artifact with: name: wheels-${{ matrix.os }}-${{ matrix.arch }} path: ./wheelhouse/xdoctest*.whl test_purepy_wheels: name: ${{ matrix.python-version }} on ${{ matrix.os }}, arch=${{ matrix.arch }} with ${{ matrix.install-extras }} if: "! startsWith(github.event.ref, 'refs/heads/release')" runs-on: ${{ matrix.os }} needs: - build_purepy_wheels strategy: fail-fast: false matrix: # Xcookie generates an explicit list of environments that will be used # for testing instead of using the more concise matrix notation. include: - python-version: '3.8' install-extras: tests-strict,runtime-strict os: ubuntu-latest arch: auto - python-version: '3.8' install-extras: tests-strict,runtime-strict os: macOS-latest arch: auto - python-version: '3.8' install-extras: tests-strict,runtime-strict os: windows-latest arch: auto - python-version: '3.13' install-extras: tests-strict,runtime-strict,optional-strict os: ubuntu-latest arch: auto - python-version: '3.13' install-extras: tests-strict,runtime-strict,optional-strict os: macOS-latest arch: auto - python-version: '3.13' install-extras: tests-strict,runtime-strict,optional-strict os: windows-latest arch: auto - python-version: '3.13' install-extras: tests os: macOS-latest arch: auto - python-version: '3.13' install-extras: tests os: windows-latest arch: auto - python-version: '3.8' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.9' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.10' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.11' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.12' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.13' install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: pypy-3.9 install-extras: tests,optional os: ubuntu-latest arch: auto - python-version: '3.8' install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.9' install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.10' install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.11' install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.12' install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.13' install-extras: tests,optional os: macOS-latest arch: auto - python-version: pypy-3.9 install-extras: tests,optional os: macOS-latest arch: auto - python-version: '3.8' install-extras: tests,optional os: windows-latest arch: auto - python-version: '3.9' install-extras: tests,optional os: windows-latest arch: auto - python-version: '3.10' install-extras: tests,optional os: windows-latest arch: auto - python-version: '3.11' install-extras: tests,optional os: windows-latest arch: auto - python-version: '3.12' install-extras: tests,optional os: windows-latest arch: auto - python-version: '3.13' install-extras: tests,optional os: windows-latest arch: auto - python-version: pypy-3.9 install-extras: tests,optional os: windows-latest arch: auto steps: - name: Checkout source uses: actions/checkout@v4.2.2 - name: Enable MSVC 64bit uses: ilammy/msvc-dev-cmd@v1 if: matrix.os == 'windows-latest' - name: Set up QEMU uses: docker/setup-qemu-action@v3.0.0 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python uses: actions/setup-python@v5.6.0 with: python-version: ${{ matrix.python-version }} - uses: actions/download-artifact@v4.1.8 name: Download wheels with: pattern: wheels-* merge-multiple: true path: wheelhouse - name: Install wheel ${{ matrix.install-extras }} shell: bash env: INSTALL_EXTRAS: ${{ matrix.install-extras }} run: |- echo "Finding the path to the wheel" ls wheelhouse || echo "wheelhouse does not exist" echo "Installing helpers: update pip" python -m pip install pip uv -U echo "Installing helpers: setuptools" python -m uv pip install setuptools>=0.8 setuptools_scm wheel build -U echo "Installing helpers: tomli and pkginfo" python -m uv pip install tomli pkginfo export WHEEL_FPATH=$(python -c "if 1: import pathlib dist_dpath = pathlib.Path('wheelhouse') candidates = list(dist_dpath.glob('xdoctest*.whl')) candidates += list(dist_dpath.glob('xdoctest*.tar.gz')) fpath = sorted(candidates)[-1] print(str(fpath).replace(chr(92), chr(47))) ") export MOD_VERSION=$(python -c "if 1: from pkginfo import Wheel, SDist import pathlib fpath = '$WHEEL_FPATH' cls = Wheel if fpath.endswith('.whl') else SDist item = cls(fpath) print(item.version) ") echo "WHEEL_FPATH=$WHEEL_FPATH" echo "INSTALL_EXTRAS=$INSTALL_EXTRAS" echo "UV_RESOLUTION=$UV_RESOLUTION" echo "MOD_VERSION=$MOD_VERSION" python -m uv pip install --prerelease=allow "xdoctest[$INSTALL_EXTRAS]==$MOD_VERSION" -f wheelhouse echo "Install finished." - name: Test wheel ${{ matrix.install-extras }} shell: bash env: CI_PYTHON_VERSION: py${{ matrix.python-version }} run: |- echo "Creating test sandbox directory" export WORKSPACE_DNAME="testdir_${CI_PYTHON_VERSION}_${GITHUB_RUN_ID}_${RUNNER_OS}" echo "WORKSPACE_DNAME=$WORKSPACE_DNAME" mkdir -p $WORKSPACE_DNAME echo "cd-ing into the workspace" cd $WORKSPACE_DNAME pwd ls -altr # Get the path to the installed package and run the tests export MOD_DPATH=$(python -c "import xdoctest, os; print(os.path.dirname(xdoctest.__file__))") export MOD_NAME=xdoctest echo " --- MOD_DPATH = $MOD_DPATH --- running the pytest command inside the workspace --- " python -m pytest --verbose -p pytester -p no:doctest --xdoctest --cov-config ../pyproject.toml --cov-report term --durations=100 --cov="$MOD_NAME" "$MOD_DPATH" ../tests echo "pytest command finished, moving the coverage file to the repo root" ls -al # Move coverage file to a new name mv .coverage "../.coverage.$WORKSPACE_DNAME" echo "changing directory back to th repo root" cd .. ls -al - name: Combine coverage Linux if: runner.os == 'Linux' run: |- echo '############ PWD' pwd cp .wheelhouse/.coverage* . || true ls -al uv pip install coverage[toml] | pip install coverage[toml] echo '############ combine' coverage combine . || true echo '############ XML' coverage xml -o ./coverage.xml || true echo '### The cwd should now have a coverage.xml' ls -altr pwd - uses: codecov/codecov-action@v5.4.3 name: Codecov Upload with: file: ./coverage.xml token: ${{ secrets.CODECOV_TOKEN }} test_deploy: name: Deploy Test runs-on: ubuntu-latest if: github.event_name == 'push' && ! startsWith(github.event.ref, 'refs/tags') && ! startsWith(github.event.ref, 'refs/heads/release') needs: - build_and_test_sdist - build_purepy_wheels steps: - name: Checkout source uses: actions/checkout@v4.2.2 - uses: actions/download-artifact@v4.1.8 name: Download wheels with: pattern: wheels-* merge-multiple: true path: wheelhouse - uses: actions/download-artifact@v4.1.8 name: Download sdist with: name: sdist_wheels path: wheelhouse - name: Show files to upload shell: bash run: ls -la wheelhouse - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://test.pypi.org/legacy/ TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TEST_TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- GPG_EXECUTABLE=gpg $GPG_EXECUTABLE --version openssl version $GPG_EXECUTABLE --list-keys echo "Decrypting Keys" openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import echo "Finish Decrypt Keys" $GPG_EXECUTABLE --list-keys || true $GPG_EXECUTABLE --list-keys || echo "first invocation of gpg creates directories and returns 1" $GPG_EXECUTABLE --list-keys VERSION=$(python -c "import setup; print(setup.VERSION)") python -m pip install pip uv -U python -m pip install packaging twine -U python -m pip install urllib3 requests[security] GPG_KEYID=$(cat dev/public_gpg_key) echo "GPG_KEYID = '$GPG_KEYID'" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" WHEEL_PATHS=(wheelhouse/*.whl wheelhouse/*.tar.gz) WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") echo "$WHEEL_PATHS_STR" for WHEEL_PATH in "${WHEEL_PATHS[@]}" do echo "------" echo "WHEEL_PATH = $WHEEL_PATH" $GPG_SIGN_CMD --output $WHEEL_PATH.asc $WHEEL_PATH $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH || echo "hack, the first run of gpg very fails" $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH done ls -la wheelhouse python -m pip install opentimestamps-client ots stamp wheelhouse/*.whl wheelhouse/*.tar.gz wheelhouse/*.asc ls -la wheelhouse twine upload --username __token__ --password "$TWINE_PASSWORD" --repository-url "$TWINE_REPOSITORY_URL" wheelhouse/*.whl wheelhouse/*.tar.gz --skip-existing --verbose || { echo "failed to twine upload" ; exit 1; } - uses: actions/upload-artifact@v4.4.0 name: Upload deploy artifacts with: name: deploy_artifacts path: |- wheelhouse/*.whl wheelhouse/*.zip wheelhouse/*.tar.gz wheelhouse/*.asc wheelhouse/*.ots live_deploy: name: Deploy Live runs-on: ubuntu-latest if: github.event_name == 'push' && (startsWith(github.event.ref, 'refs/tags') || startsWith(github.event.ref, 'refs/heads/release')) needs: - build_and_test_sdist - build_purepy_wheels steps: - name: Checkout source uses: actions/checkout@v4.2.2 - uses: actions/download-artifact@v4.1.8 name: Download wheels with: pattern: wheels-* merge-multiple: true path: wheelhouse - uses: actions/download-artifact@v4.1.8 name: Download sdist with: name: sdist_wheels path: wheelhouse - name: Show files to upload shell: bash run: ls -la wheelhouse - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://upload.pypi.org/legacy/ TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- GPG_EXECUTABLE=gpg $GPG_EXECUTABLE --version openssl version $GPG_EXECUTABLE --list-keys echo "Decrypting Keys" openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:CI_SECRET -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import echo "Finish Decrypt Keys" $GPG_EXECUTABLE --list-keys || true $GPG_EXECUTABLE --list-keys || echo "first invocation of gpg creates directories and returns 1" $GPG_EXECUTABLE --list-keys VERSION=$(python -c "import setup; print(setup.VERSION)") python -m pip install pip uv -U python -m pip install packaging twine -U python -m pip install urllib3 requests[security] GPG_KEYID=$(cat dev/public_gpg_key) echo "GPG_KEYID = '$GPG_KEYID'" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" WHEEL_PATHS=(wheelhouse/*.whl wheelhouse/*.tar.gz) WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") echo "$WHEEL_PATHS_STR" for WHEEL_PATH in "${WHEEL_PATHS[@]}" do echo "------" echo "WHEEL_PATH = $WHEEL_PATH" $GPG_SIGN_CMD --output $WHEEL_PATH.asc $WHEEL_PATH $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH || echo "hack, the first run of gpg very fails" $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH done ls -la wheelhouse python -m pip install opentimestamps-client ots stamp wheelhouse/*.whl wheelhouse/*.tar.gz wheelhouse/*.asc ls -la wheelhouse twine upload --username __token__ --password "$TWINE_PASSWORD" --repository-url "$TWINE_REPOSITORY_URL" wheelhouse/*.whl wheelhouse/*.tar.gz --skip-existing --verbose || { echo "failed to twine upload" ; exit 1; } - uses: actions/upload-artifact@v4.4.0 name: Upload deploy artifacts with: name: deploy_artifacts path: |- wheelhouse/*.whl wheelhouse/*.zip wheelhouse/*.tar.gz wheelhouse/*.asc wheelhouse/*.ots release: name: Create Github Release if: github.event_name == 'push' && (startsWith(github.event.ref, 'refs/tags') || startsWith(github.event.ref, 'refs/heads/release')) runs-on: ubuntu-latest permissions: contents: write needs: - live_deploy steps: - name: Checkout source uses: actions/checkout@v4.2.2 - uses: actions/download-artifact@v4.1.8 name: Download artifacts with: name: deploy_artifacts path: wheelhouse - name: Show files to release shell: bash run: ls -la wheelhouse - run: 'echo "Automatic Release Notes. TODO: improve" > ${{ github.workspace }}-CHANGELOG.txt' - name: Tag Release Commit if: (startsWith(github.event.ref, 'refs/heads/release')) run: |- export VERSION=$(python -c "import setup; print(setup.VERSION)") git tag "v$VERSION" git push origin "v$VERSION" - uses: softprops/action-gh-release@v1 name: Create Release id: create_release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: body_path: ${{ github.workspace }}-CHANGELOG.txt tag_name: ${{ github.ref }} name: Release ${{ github.ref }} body: Automatic Release generate_release_notes: true draft: true prerelease: false files: |- wheelhouse/*.whl wheelhouse/*.asc wheelhouse/*.ots wheelhouse/*.zip wheelhouse/*.tar.gz ### # Unfortunately we cant (yet) use the yaml docstring trick here # https://github.community/t/allow-unused-keys-in-workflow-yaml-files/172120 #__doc__: | # # How to run locally # # https://packaging.python.org/guides/using-testpypi/ # git clone https://github.com/nektos/act.git $HOME/code/act # chmod +x $HOME/code/act/install.sh # (cd $HOME/code/act && ./install.sh -b $HOME/.local/opt/act) # # load_secrets # unset GITHUB_TOKEN # $HOME/.local/opt/act/act \ # --secret=EROTEMIC_TWINE_PASSWORD=$EROTEMIC_TWINE_PASSWORD \ # --secret=EROTEMIC_TWINE_USERNAME=$EROTEMIC_TWINE_USERNAME \ # --secret=EROTEMIC_CI_SECRET=$EROTEMIC_CI_SECRET \ # --secret=EROTEMIC_TEST_TWINE_USERNAME=$EROTEMIC_TEST_TWINE_USERNAME \ # --secret=EROTEMIC_TEST_TWINE_PASSWORD=$EROTEMIC_TEST_TWINE_PASSWORDErotemic-xdoctest-fac8308/.gitignore000066400000000000000000000021731505122333300175220ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject .pytest_cache dev/*.pgp requirements-strict.txt requirements-strict tests/pybind11_test/_skbuild/ Erotemic-xdoctest-fac8308/.mailmap000066400000000000000000000005041505122333300171470ustar00rootroot00000000000000Jon Crall Jon Crall Jon Crall jon.crall Jon Crall joncrall Jon Crall joncrall Jon Crall joncrall Erotemic-xdoctest-fac8308/.readthedocs.yml000066400000000000000000000006471505122333300206240ustar00rootroot00000000000000# .readthedocs.yml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # # See Also: # https://readthedocs.org/dashboard/xdoctest/advanced/ # Required version: 2 build: os: "ubuntu-24.04" tools: python: "3.13" sphinx: configuration: docs/source/conf.py formats: all python: install: - requirements: requirements/docs.txt - method: pip path: . Erotemic-xdoctest-fac8308/CHANGELOG.md000066400000000000000000000435211505122333300173450ustar00rootroot00000000000000# Changelog We are currently working on porting this changelog to the specifications in [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Version 1.3.0 - Unreleased ### Added * New `ASYNC` basic directive to hold the asyncio event loop in any section of code. Useful for multitasking tests. ### Changed * Removed `16806_WORKAROUND` as it is not longer needed for Python 3.8+ * Removed deprecated --xdoc-force-dynamic and --allow-xdoc-dynamic flags * Improved speed (~6x) of doctest collection when running in pytest * Uses pytest plugin system to disable stdlib doctest instead of monkey patching. ### Fixed * Fixed incorrect return type in docstrings * The doctest parser no longer expand tabs to spaces ## Version 1.2.0 - Released 2024-08-20 ### Added * Support for top level awaits in async code examples. ### Removed * Dropped 3.6 and 3.7 support. Now supporting 3.8+ Use xdoctest<=1.1.6 for 3.6 or 3.7 support. ## Version 1.1.6 - Released 2024-08-01 ### Fixed * Fixed passing of `flags` as keyword argument to `re.sub` for python 3.13 compliance. ## Version 1.1.5 - Released 2024-06-07 ### Changed * Minor modification to `xdoctest --version-info` and exposed it in CLI help. ### Fixed * `modname_to_modpath` fixed in cases where editable installs use type annotations in their MAPPING definition. ## Version 1.1.4 - Released 2024-05-31 ### Fixed * Working around a `modname_to_modpath` issue. ## Version 1.1.3 - Released 2024-01-30 ### Fixed * `modname_to_modpath` now handles cases where editable packages have modules where the name is different than the package. * Update `xdoctest.plugin` to support pytest 8.0 * Fixed deprecated usage of `ast.Num` ## Version 1.1.2 - Released 2023-10-25 ### Added * Partial support for 3.12. New f-string syntax is not supported yet. ### Changed * Removed dependency on six and got rid of old Python 2 logic ## Version 1.1.1 - Released 2023-01-29 ### Changed * Binary tests are now only run on "full" installs to reduce minimal dependencies. * Support for Python 3.11 * Minor typing fixes ## Version 1.1.0 - Released 2022-09-05 ### Fixed * Can now handle basic versions of the new `__editable__` package finder mechanism. * Parsing bug where directives were incorrectly flagged as inline if they were directly followed by a function with a decorator. ### Removed * Dropped 2.7 and 3.5 support. Now supporting 3.6+ Use xdoctest<=1.0.2 for 2.7 or 3.5 support. ### Changed * Improved the "dump" functionality of converting doctests to unit tests. ## Version 1.0.2 - Released 2022-08-19 ### Added * Environs as options: `XDOCTEST_VERBOSE`, `XDOCTEST_OPTIONS`, `XDOCTEST_GLOBAL_EXEC`, `XDOCTEST_REPORT`, `XDOCTEST_STYLE`, and `XDOCTEST_ANALYSIS` environment variables can now be used to specify configuration defaults. ### Changed * Added experimental hidden feature `--insert-skip-directive-above-failures` that can be used to modify your code such that failing doctests are marked as skip. * Disabled traceback suppression on module import errors (this is is configurable via the `supress_import_errors` option). * Xdoctest will no longer try to pre-import the module if none of its doctests have any enabled lines. This also means global-exec statements will NOT run for those tests, which means you can no longer use global-exec to force enabling tests. ## Version 1.0.1 - Released 2022-07-10 ### Added * Add type stubs * Basic support for pyproject.toml under `tool.xdoctest`. Currently only supports options in the native runner. ### Fixed * Corner case bug in error reporting * Doctests that never run any code are now correctly marked as skipped * Issue where the "dumps" command was undocumented and has an error. ### Changed * Moved some globals into a new module called `global_state` and allowed environs to enable debug print statements. * Added `util_deprecation` module to robustly mark features as deprecated. * Modified the google style return type parser to return a type if the only content is some parsable python code. * Modified docscrape google to allow for parsing of `*args` and `**kwargs` in args blocks. This has also moved to the standalone package `googledoc` * Overhaul of repo structure in an effort to modernize and to agree with templates defined by xcookie * Module code now lives in the "src" directory to remove install vs develop ambiguity. ## Version 1.0.0 - Released 2022-03-25 ### Added * Support for Python 3.10 ### Fixed * Warning in pytest8 * Spelling errors in documentation ## Version 0.15.10 - Released 2021-10-08 ### Changed * The xdoctest "analysis" option now defaults to "auto" everywhere. ### Fixed * Fix issue #112 `--analysis=dynamic` argument is now respected ## Version 0.15.9 - Released 2021-09-24 ### Changed * Added GitHub actions to the CI * Disabled workaround 16806 in Python 3.8+ * New CI GPG Keys: Erotemic-CI: 70858F4D01314BF21427676F3D568E6559A34380 for reference the old signing key was 98007794ED130347559354B1109AC852D297D757. ### Fixed * Fixed minor test failures * Fixed #106 - an issue to do with compiling multiline statements in single mode. * Fixed #108 - an issue to do with compiling semicolon token in eval mode. ## Version 0.15.8 - Released 2021-09-02 ### Changed * Removed the distracting and very long internal traceback that occurred in pytest when a module errors while it is being imported before the doctest is run. * Pytest now defaults to `--xdoctest-verbose=2` by default (note this does nothing unless `-s` is also given so pytest does not suppress output) ## Version 0.15.7 - Yanked ### Fixed * Bug in REQUIRES state did not respect `python_implementation` arguments * Ported sphinx fixes from ubelt ## Version 0.15.6 - Released 2021-08-08 ### Changed * Directive syntax errors are now handled as doctest runtime errors and return better debugging information. * README and docs were improved ## Version 0.15.5 - Released 2021-06-27 ### Changed * Better message when a pytest skip or exit-test-exception occurs ### Fixed * Suppressed warning about using internal `FixtureRequest` ## Version 0.15.4 - Released 2021-01-29 ### Fixed * Minor issues with release tarballs. ## Version 0.15.3 - Released 2021-01-28 ### Fixed * Color issues on win32 ### Changed * Moved to CircleCI deploy scripts ## Version 0.15.2 - Released 2021-01-28 ### Fixed * Bug where references to doctest variables were never released ## Version 0.15.1 - Released 2021-01-28 ### Added * Documentation improvements * Minor text fixes ## Version 0.15.0 - Released 2020-09-11 ### Added * `pip install xdoctest` can now specify `[colors]` or `[jupyter]` * Enhanced REQUIRES directive behavior, multiple comma-separated requirements can now be listed in one directive. * Xdoctest can now be run inside of Jupyter notebooks / IPython sessions * Xdoctest can now be run on Jupyter notebooks (Note that in general it is better practice to write a module) ### Fixed * Bug in `doctest_callable` where it would not populate globals from the function context. ### Changed * Renamed `Config` to `DoctestConfig` * Renamed `static_analysis.parse_calldefs` to `static_analysis.parse_static_calldefs`. A temporary function with the old name is exposed for backwards compatibility. * Changed argument name from `modpath_or_name` to `module_identifier` in several functions. This is to better indicate its coercible nature as either a module path, a module name. This change impacts `doctest_module`, `parse_doctestables`, `package_calldefs`. ## [Version 0.14.0] - Released 2020-08-26 ### Added * The REQUIRES directive can now inspect existence or values of environment variables. * Added top-level `doctest_callable` function, which executes the doctests of a function or class. * Support for `NO_COLOR` environment variable. ### Fixed * `IPython.embed` and `ipdb.launch_ipdb_on_exception` now correctly work from inside doctests. * Small incompatibility with `pytest-matrix`. See #82 ## [Version 0.13.0] - Released 2020-07-10 ### Changed * `xdoctest.runner.doctest_module` now accepts the module object itself. * Zero-args doctests no longer capture stdout (this prevents IPython embedding issues). ### Fixed * Fixed minor bug in zero args runner when captured stdout is None * We now ignore doctests in setters and deleters to prevent them from clobbering doctests in getters. ## [Version 0.12.0] - Released 2020-04-16 ### Added * CLI support for doctest "analysis" mode (which can be either static or dynamic). ### Fixed * Google docstrings now allow for leading whitespace in the description. * Support python `3.9.0a5` when `eval` returns a coroutine (tentative). * Use `from_parent` constructors for `pytest` modules when possible. Fixes deprecation warning. ### TODO * better docs * support for numpy and RST example blocks * make `xdoctest -m xdoctest.__init__ __doc__:0` work like `xdoctest -m xdoctest/__init__.py __doc__:0` ## [Version 0.11.0] - Released 2019-12-18 ### Added * Add CI support for PyPy * Add CI support for CPython 3.8 * Added tox * REQUIRES directive now supports CPython, IronPython, Jython, and PyPy * REQUIRES directive now supports PY2, PY3 ## [Version 0.10.3] - Released 2019-11-14 ### Fixed * The verbose flag was previously not taken into account. This is now fixed. ## [Version 0.10.2] - Released 2019-11-12 ### Changed * The `--xdoc-glob` list of patterns now defaults to empty. In general it is not safe to assume a default pattern. This means the user must opt-in to testing text files as if they were doctests. ## [Version 0.10.1] - Released 2019-10-31 ### Changed * `PythonPathContext` now works in more corner cases, although some rarer corner cases will now break. This trade-off should be a net positive. * Releases are handled by TravisCI and will be signed with the GPG key 98007794ED130347559354B1109AC852D297D757 (note we will rotate this key in 1 year). <- (2021-09-06) lol that did not happen, someday I'll get around to setting up rotating GPG keys. ## [Version 0.10.0] - Released 2019-08-15 ### Added * Can now specify zero-args as the command to the xdoctest CLI to run all zero-args functions in a file. * Add known issue: note about possible want-reporting bug. * More docstrings * Add `--version` option to CLI interface ### Changed * You no longer need a comment to denote that a `...` is a continuation and not a ellipsis. (i.e. you don't need to write `... #`) * Want statements will check against return values in nested continuations * Cleaned up internal code, private APIs may break. * Failed doctests will now print their original line prefixes (either `>>> ` or `... ` when possible) ### Fixed * `run_tests.py` now returns the correct error code. (fixes CircleCI) * Fixed outdated docs in the directive file ## [Version 0.9.1] - Released 2019-07-16 ### Changed * Improved backwards compatibility. Explicit continuations now work more similarly to the original doctest. * You no longer need a comment to denote that a `...` is a continuation and not a ellipsis. * Want statements will check against return values in nested continuations ### Fixed * Removed debug print ## [Version 0.9.0] - Released 2019-07-16 ### Added * Add skip count to the native runner ### Changed * Renamed several functions in various classes to be private. Its unlikely anyone was externally using them. The change functions include: * `DoctestExample`: `pre_run` -> `_pre_run` * `DoctestExample`: `post_run` -> `_post_run` * `Directive`: `unpack_args` -> `_unpack_args` * `Directive`: `state_item` -> `effect` * Modified behavior of `RuntimeState.update` to use the directive effect. * Added explicit REQUIRES runtime-state, which maintains a set of unmet conditions. When non-empty it behaves like SKIP. ### Fixed * The REQUIRES directive no longer clobbers the previous SKIP state. ## [Version 0.8.3] - Released 2019-07-15 ### Fixed * The native runner now exits with a non-zero error code on failure ## [Version 0.8.2] - Released 2019-07-14 ### Changed * Slight modifications to file structure * Inherit `util_import` from `ubelt` ### Fixed * Fixed issue with nested functions and exec in older python versions * Fixed issue in modsplit with multidot suffixes. ## [Version 0.8.1] - Released 2019-05-24 ### Fixed * Minor fixes to readme and docs ## [Version 0.8.0] - Released 2019-05-03 ### Added * Added docs! Finally! ### Fixed * Got-want exceptions now return a special error if it fails to create a string-representation of the object instead of crashing. * The `index` argument in `import_module_from_path` is now correctly used. ## [Version 0.7.3] - Released 2019-03-21 ### Added * The REQUIRES directive can now accept python modules in the form: `# xdoctest: +REQUIRES(module:)` * Support for double-colon example syntax in google style parsing: e.g. `Example::`. ### Changed * Demo folder illustrating how xdoctest formats error messages * Reduced import overhead time from 20ms to 1ms. ## [Version 0.7.2] - Released 2019-02-02 ### Changed * Removed warning if `pygments` is not installed ## [Version 0.7.1] - Released 2019-02-01 ### Changed * Changed verbosity defaults ## [Version 0.7.0] - Released 2019-01-18 ### Added * Added `global-exec` to native xdoctest CLI and `xdoctest-global-exec` to the `pytest` plugin CLI ### Changed * Renamed `DocTest.globs` to `DocTest.global_namespace` * Internal test changes ### Fixed * Fixed issue in `traceback` parsing that sometimes caused incorrectly offset line numbers. ## [Version 0.6.2] - Released 20l8-12-11 ### Fixed * Fixed bug in `static_analysis.is_balanced_statement` and `static_analysis.extract_comments` having to do with empty lines * Fixed odd corner case where `import_module_from_path` seemed to modify `sys.path` in a specific environment * Fixed Python2 future issues using the print name in doctests. * Added option to print test times in the runner. ## [Version 0.6.1] - Released 2018-11-15 ### Fixed * Fixed python2 unicode error in collection phase ## [Version 0.6.0] - Released 2018-10-23 ### Added * Added nocolor command line arg * Added parserkw arg * Python 3.7 support ### Changed * Better error messages when you forget a raw string on a google block with newlines in the docstr. * Tests for malformed google docstr case. ## [Version 0.5.8] ### Fixed * Fixed install issues (/introduced hack FIXME later) * Fixed issue with raw string lineno parsing ## [Version 0.5.0] - Released 2018-07-14 ### Added * Added config option for lineno offsets. (corresponding arguments added to native and pytest runners) * Partial support for Python 3.7 ### Changed * Generally Improved doctest error reporting * Includes better coloring for quick visual inspection * Line numbers are now reported in a more intuitive way * We finally removed the exec `DoctestExample.run` from the traceback! * (we report line numbers of errors in a more intuitive way). ### Fixed * Fixed GH#20 where `doclineno_end` was incorrectly parsed * Fixed issue where google style block lineno was incorrect ## [Version 0.4.1] ### Fixed * Fixed bug with reporting elapsed time in native runner ## [Version 0.4.0] - Released 2018-06-10 ### Added * Added auto parsing style. This first tries to use Google, but falls back on freeform if no google-style doctests are found. * Errors are no longer printed twice if only a single test is being run. * Added command "dump" to native runner that reformats enabled doctests so they can be run as unit tests. * Implemented delayed want matching. This enables doctests to use multiple print statements in a row and use a single want statement to check all of their output. ### Changed * All parsers now default to the new "auto" style. * Colorized doctest now highlights "want" lines in a distinct (green) color ## [Version 0.3.5] - Released 2018-06-03 ### Changed * Changed development status to Beta * Output difference now strips the `BLANKLINE` marker if enabled ## [Version 0.3.4] - Released 2018-05-27 ### Changed * The reported difference between got and want now preserves newlines for better visibility. ## [Version 0.3.3] - Released 2018-05-13 ### Fixed * Fixed bug where pytest would collect all tests twice (because the `__init__.py` file was normalized to a directory in `package_modpaths`) ## [Version 0.3.2] - Released 2018-05-08 ### Added * API update to facilitate `mkinit` ## [Version 0.3.1] - Released 2018-04-20 ### Added * Improved doctest syntax error message * `PythonPathContext` no longer breaks if small changes to the path occur in its context. * `PythonPathContext` can now insert into front or back of sys.path * Flags can now be specified before or after positional arguments when using the __main__ script ## [Version 0.3.0] - Released 2018-04-02 ### Added * Added entry point script * example command lines now use the full path if the module is not in the `PYTHONPATH` * Can now override `sys.path` when calling `modname_to_modpath` and `is_modname_importable` (API change) ## [Version 0.2.4] - Released 2018-03-27 ### Added * added `IGNORE_WANT` directive * added separator between printout of docsrc and its stdout ## [Version 0.2.3] ### Changed * Print correct doctest line number in the traceback * Runner `on_error` will now default to return instead of raise ## [Version 0.2.2] ### Fixed * Fixed option parsing bug in __main__ script ## [Version 0.2.1] ### Added * The default runtime state can be customized with the `xdoc-options` command line argument. ### Fixed * Fix crash when trying to read binary files * Fix issue in `_docstr_line_workaround` ## [Version 0.2.0] - Released 2018-02-20 ### Added * Starting keeping a changelog, all changes before this point are only documented via the git history. ## [Version 0.1.0] - Released 2018-02-04 ### Added * Undocumented changes ## [Version 0.0.12] - Released 2017-12-29 ### Added * Undocumented changes ## [Version 0.0.1] - Released 2017-09-24 ### Added * First release Erotemic-xdoctest-fac8308/LICENSE000066400000000000000000000261151505122333300165410ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2022 "Jon Crall" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Erotemic-xdoctest-fac8308/MANIFEST.in000066400000000000000000000005471505122333300172730ustar00rootroot00000000000000include *.md include *.py include *.toml include run_doctests.sh include *.txt include LICENSE include pytest.ini recursive-include docs *.bat recursive-include docs *.py recursive-include docs *.rst recursive-include docs *.txt recursive-include docs Makefile recursive-include requirements *.txt recursive-include tests *.py recursive-include tests *.ipynb Erotemic-xdoctest-fac8308/README.rst000066400000000000000000000477721505122333300172370ustar00rootroot00000000000000|GithubActions| |CircleCI| |Appveyor| |Codecov| |Pypi| |PypiDownloads| |ReadTheDocs| .. The large version wont work because github strips rst image rescaling. .. image:: https://i.imgur.com/u0tYYxM.png :height: 100px :align: left Xdoctest - Execute doctests. A Python package for executing tests in documentation strings! What is a `doctest `__? It is example code you write in a docstring! What is a `docstring `__? Its a string you use as a comment! They get attached to Python functions and classes as metadata. They are often used to auto-generate documentation. Why is it cool? Because you can write tests while you code! Tests are good. Documentation is good. Examples are good. Doctests have low boilerplate, you write them in the same file you write your code. It often can help you write the function. Write down how to construct minimal demo inputs (it helps to have tools to create these) in your file. Copy that code into IPython/Jupyter, and play with your implementation. Copy your finished code into the body. Write down how to call the function with the demo inputs. If you feel inclined, check that the result matches an expected result (while asserts and checks are nice, a test that just shows how to run the code is better than no test at all). .. code:: python def an_algorithm(data, config): """ Example: >>> data = '([()[]])[{}([[]])]' >>> config = {'outer': sum, 'inner': ord} >>> an_algorithm(data, config) 1411 """ # I wrote this function by first finding some interesting demodata # then I wrote the body in IPython and copied it back in. # Now I can reuse this test code I wrote in development as a test! # Covered Code is much easier to debug. # We have a Minimal Working Example (MWE)! result = config['outer'](map(config['inner'], data)) return result The problem? How do you run the code in your doctest? Xdoctest finds and executes your doctests for you. Just run ``xdoctest ``. It plugs into pytest to make it easy to run on a CI. Install and run ``pytest --xdoctest``. The ``xdoctest`` package is a re-write of Python's builtin ``doctest`` module. It replaces the old regex-based parser with a new abstract-syntax-tree based parser (using Python's ``ast`` module). The goal is to make doctests easier to write, simpler to configure, and encourage the pattern of test driven development. +------------------+----------------------------------------------+ | Read the docs | https://xdoctest.readthedocs.io | +------------------+----------------------------------------------+ | Github | https://github.com/Erotemic/xdoctest | +------------------+----------------------------------------------+ | Pypi | https://pypi.org/project/xdoctest | +------------------+----------------------------------------------+ | PyCon 2020 | `Youtube Video`_ and `Google Slides`_ | +------------------+----------------------------------------------+ .. _Youtube Video: https://www.youtube.com/watch?v=CUjCqOw_oFk .. _Google Slides: https://docs.google.com/presentation/d/1563XL-n7534QmktrkLSjVqX36z5uhjUFrPw8wIO6z1c Quick Start ----------- Installation: from pypi ^^^^^^^^^^^^^^^^^^^^^^^ Xdoctest is distributed on pypi as a universal wheel and can be pip installed on Python 3.8+ (Python 2.7 and 3.4 / 3.5 support was removed in Version 1.1.0, 3.6 / 3.7 support was removed in Version 1.2.0). Installations are tested on CPython and PyPy implementations. :: pip install xdoctest Github releases are signed with a GPG public key: ``59A34380`` (note: this incorrectly was listed as ``D297D757`` for before 2024-11-15, which was an older CI signing key). If you care enough to check the gpg signature, you should also verify this agrees with the contents of ``dev/public_gpg_key``. Usage: run your doctests ^^^^^^^^^^^^^^^^^^^^^^^^ After installing, the fastest way to run all doctests in your project is: :: python -m xdoctest /path/to/your/pkg-or-module.py or if your module has been pip-installed / is in the PYTHONPATH run :: python -m xdoctest yourmodname Getting Started --------------- There are two ways to use ``xdoctest``: via ``pytest`` or via the native interface. The native interface is less opaque and implicit, but its purpose is to run doctests. The other option is to use the widely used ``pytest`` package. This allows you to run both unit tests and doctests with the same command and has many other advantages. It is recommended to use ``pytest`` for automatic testing (e.g. in your CI scripts), but for debugging it may be easier to use the native interface. Check if xdoctest will work on your package ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can quickly check if ``xdoctest`` will work on your package out-of-the box by installing it via pip and running ``python -m xdoctest all``, where ```` is the path to your python package / module (or its name if it is installed in your ``PYTHONPATH``). For example with you might test if ``xdoctest`` works on ``networkx`` or ``sklearn`` as such: ``python -m xdoctest networkx all`` / ``python -m xdoctest sklearn all``. Using the pytest interface ^^^^^^^^^^^^^^^^^^^^^^^^^^ When ``pytest`` is run, ``xdoctest`` is automatically discovered, but is disabled by default. This is because ``xdoctest`` needs to replace the builtin ``doctest`` plugin. To enable this plugin, run ``pytest`` with ``--xdoctest`` or ``--xdoc``. This can either be specified on the command line or added to your ``addopts`` options in the ``[pytest]`` section of your ``pytest.ini`` or ``tox.ini``. To run a specific doctest, ``xdoctest`` sets up ``pytest`` node names for these doctests using the following pattern: ``:::``. For example a doctest for a function might look like this ``mymod.py::funcname:0``, and a class method might look like this: ``mymod.py::ClassName::method:0`` Using the native interface. ^^^^^^^^^^^^^^^^^^^^^^^^^^^ In addition to the ``pytest`` plugin, xdoctest has a native doctest runner. You can use the ``xdoctest`` command line tool that is installed with the package and point it a module directory or a particular file. You can also make it such that invoking your module as ``__main__`` invokes the xdoctest native runner using the using the ``xdoctest.doctest_module(path)`` method, which can be placed in the ``__main__`` section of any module as such: .. code:: python if __name__ == '__main__': import xdoctest xdoctest.doctest_module(__file__) This sets up the ability to invoke the ``xdoctest`` command line interface. ``python -m ``. However, it is typically preferred to just use the ``xdoctest`` executable and pass it the path to your file, or the name of an installed module. In this case it is invoked like ``xdoctest -m ``. Using either of these methods you can natively invoke xdoctest on a module or package, which exposes the command line interface. Both of these expose the command line interface, allowing you to pass a command to xdoctest. - If ```` is ``all``, then each enabled doctest in the module is executed: ``python -m all`` - If ```` is ``list``, then the names of each enabled doctest is listed. - If ```` is ``dump``, then all doctests are converted into a format suitable for unit testing, and dumped to stdout (new in 0.4.0). - If ```` is a ``callname`` (name of a function or a class and method), then that specific doctest is executed: ``python -m ``. Note: you can execute disabled doctests or functions without any arguments (zero-args) this way. For example if you created a module ``mymod.py`` with the following code: .. code:: python def func1(): """ Example: >>> assert func1() == 1 """ return 1 def func2(a): """ Example: >>> assert func2(1) == 2 >>> assert func2(2) == 3 """ return a + 1 You could * Use the command ``xdoctest -m mymod list`` to list the names of all functions with doctests * Use the command ``xdoctest -m mymod all`` to run all functions with doctests * Use the command ``xdoctest -m mymod func1`` to run only func1's doctest * Use the command ``xdoctest -m mymod func2`` to run only func2's doctest Passing ``--help`` to either way of invoking the native runner will result in something similar to the following that outlines what other options are available: .. code:: usage: xdoctest [-h] [--version] [-m MODNAME] [-c COMMAND] [--style {auto,google,freeform}] [--analysis {auto,static,dynamic}] [--durations DURATIONS] [--time] [--colored COLORED] [--nocolor] [--offset] [--report {none,cdiff,ndiff,udiff,only_first_failure}] [--options OPTIONS] [--global-exec GLOBAL_EXEC] [--verbose VERBOSE] [--quiet] [--silent] [arg ...] Xdoctest 1.0.1 - on Python - 3.9.9 (main, Jun 10 2022, 17:45:11) [GCC 11.2.0] - discover and run doctests within a python package positional arguments: arg Ignored if optional arguments are specified, otherwise: Defaults --modname to arg.pop(0). Defaults --command to arg.pop(0). (default: None) optional arguments: -h, --help show this help message and exit --version Display version info and quit (default: False) -m MODNAME, --modname MODNAME Module name or path. If specified positional modules are ignored (default: None) -c COMMAND, --command COMMAND A doctest name or a command (list|all|). Defaults to all (default: None) --style {auto,google,freeform} Choose the style of doctests that will be parsed (default: auto) --analysis {auto,static,dynamic} How doctests are collected (default: auto) --durations DURATIONS Specify execution times for slowest N tests.N=0 will show times for all tests (default: None) --time Same as if durations=0 (default: False) --colored COLORED Enable or disable ANSI coloration in stdout (default: True) --nocolor Disable ANSI coloration in stdout --offset If True formatted source linenumbers will agree with their location in the source file. Otherwise they will be relative to the doctest itself. (default: False) --report {none,cdiff,ndiff,udiff,only_first_failure} Choose another output format for diffs on xdoctest failure (default: udiff) --options OPTIONS Default directive flags for doctests (default: None) --global-exec GLOBAL_EXEC Custom Python code to execute before every test (default: None) --verbose VERBOSE Verbosity level. 0 is silent, 1 prints out test names, 2 additionally prints test stdout, 3 additionally prints test source (default: 3) --quiet sets verbosity to 1 --silent sets verbosity to 0 Zero-args runner ^^^^^^^^^^^^^^^^ The native interface has a "zero-args" mode in the ``xdoctest`` runner. This allows you to run functions in your modules via the command line as long as they take no arguments. The purpose is to create a quick entry point to functions in your code (because ``xdoctest`` is taking the space in the ``__main__`` block). For example, you might create a module ``mymod.py`` with the following code: .. code:: python def myfunc(): print('hello world') if __name__ == '__main__': import xdoctest xdoctest.doctest_module(__file__) Even though ``myfunc`` has no doctest it can still be run using the command ``python -m mymod myfunc``. Note, even though "zero-arg" functions can be run via this interface they are not run by ``python -m mymod all``, nor are they listed by ``python -m mymod list``. However, if you are doing this often, you may be better served by `fire `__. Enhancements ------------ The main enhancements ``xdoctest`` offers over ``doctest`` are: 1. All lines in the doctest can now be prefixed with ``>>>``. There is no need for the developer to differentiate between ``PS1`` and ``PS2`` lines. However, old-style doctests where ``PS2`` lines are prefixed with ``...`` are still valid. 2. Additionally, the multi-line strings don't require any prefix (but its ok if they do have either prefix). 3. Tests are executed in blocks, rather than line-by-line, thus comment-based directives (e.g. ``# doctest: +SKIP``) can now applied to an entire block (by placing it one the line above), in addition to having it just apply to a single line (by placing it in-line at the end). 4. Tests without a "want" statement will ignore any stdout / final evaluated value. This makes it easy to use simple assert statements to perform checks in code that might write to stdout. 5. If your test has a "want" statement and ends with both a value and stdout, both are checked, and the test will pass if either matches. 6. Output from multiple sequential print statements can now be checked by a single "got" statement. (new in 0.4.0). 7. Examples can include `async code at the top level `__. See code in ``dev/_compare/demo_enhancements.py`` for a demo that illustrates several of these enhancements. This demo shows cases where ``xdoctest`` works but ``doctest`` fails. As of version 0.9.1, there are no known syntax backwards incompatibility. Please submit an issue if you can find any backwards incompatible cases. Examples -------- Here is an example demonstrating the new relaxed (and backwards-compatible) syntax: .. code:: python def func(): """ # Old way >>> def func(): ... print('The old regex-based parser required specific formatting') >>> func() The old regex-based parser required specific formatting # New way >>> def func(): >>> print('The new ast-based parser lets you prefix all lines with >>>') >>> func() The new ast-based parser lets you prefix all lines with >>> """ .. code:: python def func(): """ # Old way >>> print(''' ... It would be nice if we didnt have to deal with prefixes ... in multiline strings. ... '''.strip()) It would be nice if we didnt have to deal with prefixes in multiline strings. # New way >>> print(''' Multiline can now be written without prefixes. Editing them is much more natural. '''.strip()) Multiline can now be written without prefixes. Editing them is much more natural. # This is ok too >>> print(''' >>> Just prefix everything with >>> and the doctest should work >>> '''.strip()) Just prefix everything with >>> and the doctest should work """ Xdoctest Parsing Style ---------------------- There are currently two main doctest parsing styles: ``google`` and ``freeform``, as well as a third style: ``auto``, which is a hybrid. The parsing style can be set via the ``--style`` command line argument in the Xdoctest CLI, or via the ``--xdoctest-style`` if using pytest. Setting ``--style=google`` (or ``--xdoctest-style=google`` in pytest) enables google-style parsing. A `Google-style `__ doctest is expected to exist in Google "docblock" with an ``Example:`` or ``Doctest:`` tag. All code in this block is parsed out as a single doctest. Setting ``--style=freeform`` (or ``--xdoctest-style=freeform`` in pytest) enables freeform-style parsing. A freeform style doctest is any contiguous block of lines prefixed by ``>>>``. This is the original parsing style of the builtin doctest module. Each block is listed as its own test. By default Xdoctest sets ``--style=auto`` (or ``--xdoctest-style=auto`` in pytest) which will pull all google-style blocks out as single doctests, while still all other ``>>>`` prefixed code out as a freeform doctest. Notes On Got/Want Tests ----------------------- The new got/want tester is very permissive by default; it ignores differences in whitespace, tries to normalize for python 2/3 Unicode/bytes differences, ANSI formatting, and it uses the old doctest ELLIPSIS fuzzy matcher by default. If the "got" text matches the "want" text at any point, the test passes. Currently, this permissiveness is not highly configurable as it was in the original doctest module. It is an open question as to whether or not this module should support that level of configuration. If the test requires a high degree of specificity in the got/want checker, it may just be better to use an ``assert`` statement. Backwards Compatibility ----------------------- There are no known syntax incompatibilities with original doctests. This is based on running doctests on real life examples in ``boltons``, ``ubelt``, ``networkx``, ``pytorch``, and on a set of extensive testing suite. Please raise an issue or submit a merge/pull request if you find any incompatibility. Despite full syntax backwards compatibility, there some runtime incompatibilities by design. Specifically, Xdoctest enables a different set of default directives, such that the "got"/"want" checker is more permissive. Thus, a test that fails in ``doctest`` based on a "got"/"want" check, may pass in ``xdoctest``. For this reason it is recommended that you rely on coded ``assert``-statements for system-critical code. This also makes it much easier to transform your ``xdoctest`` into a ``unittest`` when you realize your doctests are getting too long. One Last Example ---------------- XDoctest is a good demonstration of itself. After pip installing xdoctest, try running xdoctest on xdoctest. .. code:: bash xdoctest xdoctest If you would like a slightly less verbose output, try .. code:: bash xdoctest xdoctest --verbose=1 # or xdoctest xdoctest --verbose=0 You could also consider running xdoctests tests through pytest: .. code:: bash pytest $(python -c 'import xdoctest, pathlib; print(pathlib.Path(xdoctest.__file__).parent)') --xdoctest If you would like a slightly more verbose output, try .. code:: bash pytest -s --verbose --xdoctest-verbose=3 --xdoctest $(python -c 'import xdoctest, pathlib; print(pathlib.Path(xdoctest.__file__).parent)') If you ran these commands, the myriad of characters that flew across your screen are lots more examples of what you can do with doctests. .. |CircleCI| image:: https://circleci.com/gh/Erotemic/xdoctest.svg?style=svg :target: https://circleci.com/gh/Erotemic/xdoctest .. |Travis| image:: https://img.shields.io/travis/Erotemic/xdoctest/main.svg?label=Travis%20CI :target: https://travis-ci.org/Erotemic/xdoctest .. |Appveyor| image:: https://ci.appveyor.com/api/projects/status/github/Erotemic/xdoctest?branch=main&svg=True :target: https://ci.appveyor.com/project/Erotemic/xdoctest/branch/main .. |Codecov| image:: https://codecov.io/github/Erotemic/xdoctest/badge.svg?branch=main&service=github :target: https://codecov.io/github/Erotemic/xdoctest?branch=main .. |Pypi| image:: https://img.shields.io/pypi/v/xdoctest.svg :target: https://pypi.python.org/pypi/xdoctest .. |PypiDownloads| image:: https://img.shields.io/pypi/dm/xdoctest.svg :target: https://pypistats.org/packages/xdoctest .. |CondaDownloads| image:: https://anaconda.org/conda-forge/xdoctest/badges/downloads.svg :target: https://anaconda.org/conda-forge/xdoctest .. |ReadTheDocs| image:: https://readthedocs.org/projects/xdoctest/badge/?version=latest :target: https://xdoctest.readthedocs.io .. |GithubActions| image:: https://github.com/Erotemic/xdoctest/actions/workflows/tests.yml/badge.svg?branch=main :target: https://github.com/Erotemic/xdoctest/actions?query=branch%3Amain Erotemic-xdoctest-fac8308/clean.sh000077500000000000000000000013241505122333300171500ustar00rootroot00000000000000#!/bin/bash echo "start clean" rm -rf _skbuild rm -rf coverage.xml rm -rf -- *.so rm -rf build rm -rf xdoctest.egg-info rm -rf src/xdoctest.egg-info rm -rf dist rm -rf docs/build rm -rf mb_work rm -rf wheelhouse rm -rf .pytest_cache rm -rf pip-wheel-metadata rm -rf htmlcov rm -rf .coverage rm -rf __pycache__ rm -rf tests/pybind11_test/tmp rm -rf tests/pybind11_test/_skbuild rm -rf tests/pybind11_test/my_ext.egg-info rm -rf .mypy_cache rm distutils.errors 2&> /dev/null || echo "skip rm" CLEAN_PYTHON='find . -iname *.pyc -delete ; find . -iname *.pyo -delete ; find . -regex ".*\(__pycache__\|\.py[co]\)" -delete' bash -c "$CLEAN_PYTHON" echo "finish clean" __fixperm_notes__=""" chmod o+rw . chmod o+rw -R * """ Erotemic-xdoctest-fac8308/dev/000077500000000000000000000000001505122333300163055ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/dev/_compare/000077500000000000000000000000001505122333300200725ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/dev/_compare/demo_enhancements.py000066400000000000000000000041261505122333300241230ustar00rootroot00000000000000""" This file contains doctests that work in xdoctest but fail in doctest Use the following command lines to run the doctest and xdoctest version to see the difference: CommandLine: python -m xdoctest demo_enhancements.py python -m doctest demo_enhancements.py """ def multiline_madness(): """ >>> if True: >>> print('doctest requires a special ... prefix') doctest requires a special ... prefix """ pass def embeded_triple_quotes(): """ >>> x = ''' xdoctest is good at dealing with triple quoted strings you don't even need to have the >>> prefix, because the AST knows you are in a string context ''' >>> print(x) xdoctest is good at dealing with triple quoted strings you don't even need to have the >>> prefix, because the AST knows you are in a string context """ pass def sequential_print_statements(): """ >>> print('In builtin doctest you have to handle stdout on EVERY line') >>> print('But in xdoctest its no problem') In builtin doctest you have to handle stdout on EVERY line But in xdoctest its no problem """ pass def repl_print_statements(): """ >>> print('but sometimes repl is good') but sometimes repl is good >>> print('thats ok, we support it') thats ok, we support it """ pass def multiple_eval_for_loops_v1(): """ Previously this failed in xdoctest, but now it works as of 0.9.1 >>> for i in range(2): ... '%s' % i ... '0' '1' """ def multiple_eval_for_loops_v2(): """ However, xdoctest can handle this as long as you print to stdout >>> for i in range(2): ... print('%s' % i) ... 0 1 """ def compact_style_code(): """ This compact style is a bit ugly, but it should still be valid python Exception: >>> try: raise Exception # doctest: +ELLIPSIS ... except Exception: raise Traceback (most recent call last): ... Exception ... """ try: raise Exception # NOQA except Exception: pass # NOQA Erotemic-xdoctest-fac8308/dev/_compare/demo_failures.py000066400000000000000000000006351505122333300232660ustar00rootroot00000000000000""" This contains that fail in both. This demos what correct failures look like in each case. CommandLine: python -m xdoctest demo_failures.py python -m doctest demo_failures.py """ def do_asserts_work(): """ >>> assert False, 'this test should fail' """ pass def multiple_eval_for_loops_v1_fail(): """ >>> for i in range(2): ... '%s' % i ... 0 1 """ Erotemic-xdoctest-fac8308/dev/_compare/demo_issue_106.py000066400000000000000000000061611505122333300231720ustar00rootroot00000000000000r""" https://github.com/Erotemic/xdoctest/issues/106 cd ~/code/xdoctest/dev/_compare/ python -m xdoctest demo_issue_106.py python -m doctest demo_issue_106.py Note: the reason this fails is because this fails: compile('try: raise Exception\nexcept Exception: print', mode='single', filename="") In exec mode we are ok compile('try: raise Exception\nexcept Exception: print', mode='exec', filename="") This has to do with the assign ps1 line function that determines if we should be in exec or single mode Other tests compile('if 1:\n a', mode='single', filename="") compile('if 1:\n print', mode='single', filename="") compile('if 1:\n x = 1\n y = 2\nelse:\n pass', mode='single', filename="") compile('try:\n raise Exception\nexcept Exception:\n pass', mode='single', filename="") compile('try: raise Exception\nexcept Exception: pass', mode='single', filename="") except Exception: print', mode='single', filename="") """ import sys def logTracebackThisDoesnt(logFunction): r""" Logs the exception traceback to the specified log function. >>> # xdoctest: +IGNORE_WANT >>> try: raise Exception() # doctest: +ELLIPSIS ... except Exception: print(lambda *a, **b: sys.stdout.write(str(a) + "\n" + str(b))) Traceback (most recent call last): ... Exception ... """ sys.exc_info() # import xdev # xdev.embed() logFunction pass def slurpFile(path, mode, maxbytes, **kwds): """ >>> import os; slurpFile(os.path.abspath(__file__), mode = 'rb')[:9] b'# coding=' >>> import os; slurpFile(os.path.abspath(__file__), encoding='utf-8')[:9] '# coding=' """ pass # def logTracebackThisWorks(logFunction): # r""" Logs the exception traceback to the specified log function. # >>> try: raise Exception() # doctest: +ELLIPSIS # >>> except Exception: print(lambda *a, **b: sys.stdout.write(str(a) + "\n" + str(b))) # Traceback (most recent call last): # ... # Exception # ... # """ # sys.exc_info() # # import xdev # # xdev.embed() # logFunction # pass # # ... except Exception: logTraceback(lambda *a, **b: sys.stdout.write(a[0] + "\n", *a[1:], **b)) # # def compact_style_code(): # # """ # # This compact style is a bit ugly, but it should still be valid python # # Exception: # # >>> try: raise Exception # doctest: +ELLIPSIS # # ... except Exception: raise # # Traceback (most recent call last): # # ... # # Exception # # ... # # """ # # try: raise Exception # NOQA # # except Exception: pass # NOQA # def logTraceback2(logFunction): # r""" Logs the exception traceback to the specified log function. # >>> try: # ... raise Exception() # ... except Exception: # ... logTraceback(lambda *a, **b: sys.stdout.write(a[0] + "\n", *a[1:], **b)) # Traceback (most recent call last): # ... # Exception # ... # """ # import sys # logFunction(*sys.exec_info) # logFunction() # pass Erotemic-xdoctest-fac8308/dev/_old_pkgutil_code.py000066400000000000000000000022011505122333300223200ustar00rootroot00000000000000 def _pkgutil_submodule_names(modpath, with_pkg=False, with_mod=True): """ Ignore: x = sorted(submodule_paths(modname_to_modpath('ubelt'))) y = sorted(_pkgutil_submodule_names(modname_to_modpath('ubelt'))) x = [modpath_to_modname(p, hide_init=False, hide_main=False) for p in x] print('x = {!r}'.format(x)) print('y = {!r}'.format(y)) Notes: this will take into account pyc files, we choose not to. """ package_name = modpath_to_modname(modpath) if isfile(modpath): # If input is a file, just return it yield package_name else: # Otherwise, if it is a package, find sub-packages and sub-modules import pkgutil # dont use the pkgutil version, as it is incompatible with pytest prefix = package_name + '.' walker = pkgutil.walk_packages([modpath], prefix=prefix, onerror=lambda x: None) # nocover for importer, modname, ispkg in walker: if not ispkg and with_mod: yield modname elif ispkg and with_pkg: yield modname Erotemic-xdoctest-fac8308/dev/backwards_incompatiblity_examples_inthewild.py000066400000000000000000000051541505122333300277010ustar00rootroot00000000000000# From boltons # python -m xdoctest boltons/urlutils.py OrderedMultiDict.sorted:0 # def distinguish_ellipses_from_extension(): # """ # This is a parsing error because how do we know if ... is an ellipses or # a parsing error? # Note, if we put a # after the empty line, the code still breaks, so that is # something we can/should fix. # >>> class NamesFilter: # ... def __init__(self, allowed): # ... self._allowed = allowed # ... # ... def filter(self, names): # ... return [name for name in names if name in self._allowed] # """ def known_indent_value_case(): """ xdoctest -m ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py known_indent_value_case >>> b = 3 >>> if True: ... a = 1 ... isinstance(1, int) True """ def foo(): """ the b'' prefix is messing this up xdoctest ~/code/boltons/boltons/urlutils.py OrderedMultiDict xdoctest ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py foo >>> from pprint import pprint as pp # ensuring proper key ordering >>> omd = {'a': 3, 'b': 2} >>> pp(dict(omd)) {'a': 3, 'b': 222} """ def eval_in_loop_case(): """ xdoctest -m ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py eval_in_loop_case >>> for i in range(2): ... '%s' % i ... '0' '1' """ def breaking(): """ CommandLine: xdoctest -m ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py breaking Example: >>> from xdoctest.utils import codeblock >>> # Simulate an indented part of code >>> if True: >>> # notice the indentation on this will be normal >>> codeblock_version = codeblock( ... ''' ... def foo(): ... return 'bar' ... ''' ... ) >>> # notice the indentation and newlines on this will be odd >>> normal_version = (''' ... def foo(): ... return 'bar' ... ''') >>> assert normal_version != codeblock_version >>> print('Without codeblock') >>> print(normal_version) >>> print('With codeblock') >>> print(codeblock_version) """ def linestep(): r""" CommandLine: xdoctest -m ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py linestep python -m doctest ~/code/xdoctest/dev/backwards_incompatiblity_examples_inthewild.py linestep >>> print(r'foo\r\n') 'foo' """ Erotemic-xdoctest-fac8308/dev/check_pytest_version_compat.py000066400000000000000000000036671505122333300244700ustar00rootroot00000000000000 import os import ubelt as ub from distutils.version import LooseVersion os.chdir(ub.expandpath("$HOME/code/pytest")) info = ub.cmd('git tag') tags = [t for t in info['out'].split('\n') if t] tags = sorted(tags, key=LooseVersion) has_ispytest = {} language_classifiers = {} for tag in ub.ProgIter(tags): ub.cmd('git checkout {}'.format(tag)) info = ub.cmd('grep -I -ER _ispytest') has_ispytest[tag] = len(info['out'].strip()) > 0 info = ub.cmd('grep -I -ER "Programming Language :: Python" setup.cfg') language_classifiers[tag] = info['out'] for tag, flag in has_ispytest.items(): if flag: break print('First tag with _pytest = {!r}'.format(tag)) pythonversion_to_supported = ub.ddict(list) for tag, clfs in language_classifiers.items(): if clfs != '': for line in clfs.split('\n'): pythonversion_to_supported[line.strip()].append(tag) keys = [ 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ] for key in keys: cands = pythonversion_to_supported[key] if cands: max_version = max(cands, key=LooseVersion) print('key = {} max_version = {!r}'.format(key, max_version)) # key = Programming Language :: Python :: 2.7 max_version = '4.6.11' # key = Programming Language :: Python :: 3.4 max_version = '4.6.11' # key = Programming Language :: Python :: 3.5 max_version = '6.2.0.dev0' # key = Programming Language :: Python :: 3.6 max_version = '6.3.0.dev0' # key = Programming Language :: Python :: 3.7 max_version = '6.3.0.dev0' # key = Programming Language :: Python :: 3.8 max_version = '6.3.0.dev0' # key = Programming Language :: Python :: 3.9 max_version = '6.3.0.dev0' Erotemic-xdoctest-fac8308/dev/ci_public_gpg_key.pgp.enc000066400000000000000000000042731505122333300232250ustar00rootroot00000000000000U2FsdGVkX19CAiiFDdhBuWgigVVamCqGA3XYHWV1nbQKC1P3RgOBMGlPdX52jMmN Zoa5BOeLaHJPGzS7Kv3eSMp6gBbCh+d2P/5FIGZjE64WRBNWlBfBe5yZz0FJCLIH Dsu5kH98pV/efgKLo8AtJ7UkSqeWGBuXnUtyWCmz5likJ5Ulw7j/7zperygSYX2f xQ1PC01z8jLGIXhTW4aQvLDnu3NEDko55u6i0gBh90TeYE2SK75nYMHY/k/mSwHf ap7t+PHvVkviAgqZ9EP43hN7mjdBve79xEQmoopN+rC0yrNkOMXw2cToYAq0AFKu e9INlFuEgfWgXORtVdoEYYTburnm/Y3xmBhYLxBzPNEfJBrjcxInChwbiEdhTYOo yCYo0Rx0JwePbMe6p7qjrX4wEP64GU7mDqT2CIA9ScvTSjABo97O0iRYEEFZelEg PN8gu7fldLOtFyRd6TmNLZFAJlWvGjjO7StXTLouuxzTQqWwpr8czkbJQGHxZDy2 ObIc+Hco2oGwrINeyXIwRi4ylwsHbQ2Aeuk+l/YfXU8faqOdE7kZ9BY9DDzFYVkX dUn6aGe3Y1wnidZDbxYrFFcpX+RpCgwTPx8wHWLW8nEjDUvL/luILOQYteoWoX6R UQ6ljle1v/Hgj171OXdkZtCHnKU3w9zqzwoyt2J/TEQklDCC+JqFP3+AZDS8vCdf m3HX5whfwqkGQIgZmUSw1xmcgxuIvNndArAU1OlyPyjFxwxWwwWk43LTt9niS7TM bQ4MsSOKy32aPhsYcD3gb3y9F9a/YAePeNVu4GIa5hneJBGK7qmiulKvbPor81vM wW5DGgsjzMsUcvLa0vfq47JbVVisRK3GG9c378B65/vg6QlpOhHMeKQdyiGLiEPG JmQYZL7c9VSRz1RJBm0Shi6plGqgvsKnlircUAGenvgnB/ux5tnCxrKb6+Ml/Fqj 7EVE5B/FAqk6WM3Mcya/LY0jzlQVExuLlpIOsZU6kYRRA8S+8Y2heOIBOynMrC51 Rs4EaX1mkemVd4A3W6vscsKPKwO5Y9ZkR2zrSoZsF9rBTxJfRtGoUWFHa//Jfkg6 sAeHCzcF9Tp4t0uvWOpK5s9cFA8yfhQ5edJWAPUYbkci+QcbCNuXHvz/h6L5BdD9 VYFQ0pzqtRwx4rgKMkXwuIb8gyr4twsnsj/85O5ChJoRWRIxtRhAgFrHbzJ6f8je 3aiOdhaLebnZzLhZLgEiIrKYTL96Jfe94NOeBOL+ETgFHd+29gZ4oQkpypnQ6gAS JbYzlEtIBds3xnmErNHLXF9200+pVGMnhNJKIWeCAZGI6do5ikhZ/l9a9psfiumj YaRK5juASN+ud/AOsBwE49jt6CLdzXcKQDUEEHDH0+huPcecCJdO/HSJWxb7x2mM dzhkFIS99zFd29NQ4PlP2pE6FuC7xCpsDJsMJUg5584oARa+QeEqzo1dM3/MWhbJ kypca+RBW2LN6oAxhwQM5jfc4uGuLA38jDYvOxtvZZaqgVuy3VGUn/a3M58AsTUe iRDJXAyZDGmk6voTC79ds3PcYRx3Y8Qo6s4rrC8SDfBmhtE0bHS9nUcl8sTtx74K MWC5I+EBH51tCa8BRvCtDf0kofy9fybZRftfkFe436bjukghGMJMUu3w0fj3ppjB nZmMuiKQEzbAjzYMd1sukRvOnJC2rFOrNV+DB3XYaGxWXiZmZ0pew35IlGjzaVEW wrMIhZaojOpndug/lj35pzyokeZEDFYU8546w8MKNwZozReMmiH5+Mj2E8nrTdoh TVGVjzAiVu9Ovv+yXZ/zpnqZBTNfY0l+PHkiH5/nyMEr2hFeptPe3u1Y8xPb1K33 uquD0l/cMuXz9GbHC/tD6KBGPwt6xnt9xKYMgRMWxeiC85qwidYHNmbV6cnrjm9q 8dnm1pCiQ85V6r452ImzoRox1t0XwMNwWktkSdEyj6J5MuQYcHA2BuYYL3l3VEBP aWMBTH+e5JTc9OcmvU8dMBvkHcjXPSLEPMczBxAFp3CLXHGxWI9pD0T4nS7LfIWm qlXvx/Hi5VgJ1mZ1Chz1vvBzU+YOy6TEDiVb/kCdO4yuR/tgIQYiCLkfsCLjMfzU VE2UJKN+I5aT9lQYrl9DfB6TymNiymcttywL0O7s7sObNYeeRLHu5vpKf/7rRDzt 4MfLqyiIkPTR8JDAczD7PQ== Erotemic-xdoctest-fac8308/dev/ci_secret_gpg_subkeys.pgp.enc000066400000000000000000000033331505122333300241250ustar00rootroot00000000000000U2FsdGVkX1/KOEkIHHX5pOeE9VrSAREfkhxKRPLZOcHQ442PbAwcyE9Zo7fh7ceJ 0eeNbPgKfUmUUzBQvhC5CmduNsMX3xhE9/SyR1RM0HpsI5yRBINSGk2fAl9SqGIh n4IabEyXLgltxTWnEfpL8Y0QFij2haV6tEOYuoav2Iux/yJ4kd9LjjFIgIqmFUiu 8vgABzyiURfPZCTrwqgXRqOyAWWVg5lXi/RWMa60bCPsKf/9qaLsqzdVgX1+8s8U KyBVYmJIRR5dykS+bczEdttsUxP9+BggGkNtbA/dLJ9S8xVDq4CJBei28XG9PRVL 8pK8c6xoN8H/By7D4Q1Cz+/S7sCuaejNtw4AkqTWWwjtiV017MZUxOw4siDOblFZ /xmiPXJETCXXwAaJksTxH+W6Mp20WFuYPvYpistjqbc9BttLiBzY8MzcsV3IKv95 F+gt4EXE31hxTKGKtX4PIbxmlfW9hOL3f4HhV6qAl9sF2vWbA69b92SXtZdCiYsJ 7lEcvyS2dxa3MqNiDFDrAVaIc2AdMbwrf3rGM+mq0p821Ak8Apjn6d+JxSKvv5cP t0fnL1B5FFvJxjLdjwaU5MS71+vKiMAChnIE9keZt6LIhYGYqKDDO3Of6+RBD7/2 8pcT9e9ao3gKbwpNO+hooEHH0uO6lo9xIitao/lkNCZ5MRvHQGOuXqUGkvTXHFKl 5Hag0B3TDQFN8/ECaPM975g32FDNQ3QUkihIfxTF8p31js53tk207Rc/Jr00Zf1Y UrXBHDfi7JVH+ieOOe0qiE3FyBiR4s53Mg/nc+qrJ+sQVVT/W07VCBlJfM/wYACt Jo/OgPVMEFEejXqnhBds4GeKT8uVnw9Zx1RFCsdUOyyQYhAcIMdt/5GHW6sEr8Vv OdubY/d/uWH8vXIaHSy7vfOPmaUuFXmJfkTiUR2ez4jzfLYlofwovq/nkVVmjqcp dCZgeaFO46MMgN5QS7J1OF1agkBbRR/nthNUTLqw6zl95ki1oHL+RjEIaTiyW2L9 lAB+riD2X4P9/KdXv0Nh+QhQir72DUUUIZ8lC6t+CpTCaaILEYpQuwHOLqvUwIZJ Zr8mlv0caa61zi0qw1JXcTjzU32TS4qw1x1lCons/3wn8zvJP6CJ1C7/b5obT8m6 LBO90fvKP8UWzen2GnqR3BP9ayCU7jq/aM2+QIdSzYPis7b9lbAc9o3c9R0DZOoK jmciLzBZVeH7pFAw1mlDULIrMX8iI2jr5+xI3R7KIhOa6Ow0cHjL4VMnMlYw98ps kdiNyuthK8iolngeOBBNfBYf330//Xm49y+xeEwzkBfTXJdEOk8FOKfJvcNoZxZ8 tZvFarejHYgLwmWpQe3vw2Ew/4EXtKQ6RNh96NBAJ9g02uNHt2/lHP+eQXG6B2fI hPgcxYuvYihPYwcZg0/so0nanUy8I5FRhOh6XVH+Iyw7vvnXRuY2Smpoi75fOcYC df2lWopgAwzNY+6/OI6yqUJ7v2PZjPe8iaEJ59S84Tujo0vNlAXLymxZ14fRxflf ZLNP2ua/tjuUd1pA3z0VVlTKpND7obVRQHOqLGzaRe6/e0v9uI8Bek7g0MkKv6sb LeJcG0YDlvj2/2EpSyjDn5Uk4oQZAED/N2nchcosWyoPXjsQcfRaPk8Md89JbZ1Z oT4CU71rk4b4fnWKBl36E8ET3Kxzh+qnEmUWu1iEvyZKFqasJ58YMb7wRvEt0Y6X Erotemic-xdoctest-fac8308/dev/countmin.py000066400000000000000000000010651505122333300205150ustar00rootroot00000000000000 def main(): import re pat = re.compile(r'.*- (?P\d) minute.*') n = 0 matches = [] for line in open('outline.md', 'r').read().splitlines(): match = pat.match(line) if match: # print('line = {!r}'.format(line)) print('match = {!r}'.format(match)) n += int(match.groupdict()['num']) matches += [match] print('n = {!r}'.format(n)) print(len(matches)) if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/dev/countmin.py """ main() Erotemic-xdoctest-fac8308/dev/demo/000077500000000000000000000000001505122333300172315ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/dev/demo/demo_dynamic_analysis.py000066400000000000000000000006031505122333300241350ustar00rootroot00000000000000""" CommandLine: xdoctest ~/code/xdoctest/dev/demo/demo_dynamic_analysis.py --analysis=auto xdoctest ~/code/xdoctest/dev/demo/demo_dynamic_analysis.py --analysis=dynamic xdoctest ~/code/xdoctest/dev/demo/demo_dynamic_analysis.py --xdoc-force-dynamic """ def func() -> None: r''' Dynamic doctest >>> %s %s ''' return func.__doc__ %= ('print(1)', '1') Erotemic-xdoctest-fac8308/dev/demo/demo_errors.py000066400000000000000000000036011505122333300221230ustar00rootroot00000000000000#!/usr/bin/env python """ This file contains doctests with errors. Executing xdoctest on this file will demo how xdoctest reports errors. (It can also be used / was created for debugging) """ def demo1(): """ CommandLine: xdoctest -m ~/code/xdoctest/dev/demo/demo_errors.py demo1 Example: >>> raise Exception('demo1') """ pass def demo2(): """ CommandLine: xdoctest -m ~/code/xdoctest/dev/demo/demo_errors.py demo2 Example: >>> print('error on different line') >>> raise Exception('demo2') """ pass def demo3(): """ CommandLine: xdoctest -m ~/code/xdoctest/dev/demo/demo_errors.py demo3 Example: >>> print('demo5') demo3 """ pass class Demo5: """ CommandLine: xdoctest -m ~/code/xdoctest/dev/demo/demo_errors.py Demo5 Example: >>> raise Exception """ def demo5(self): """ CommandLine: xdoctest -m ~/code/xdoctest/dev/demo/demo_errors.py Demo5.demo5 Example: >>> raise Exception """ pass def demo_parsetime_syntax_error1(): """ Example: >>> from __future__ import print_function >>> print 'Parse-Time Syntax Error' """ def demo_parsetime_syntax_error2(): """ Example: >>> def bad_syntax() return for """ def demo_runtime_error(): """ Example: >>> print('Runtime Error {}'.format(5 / 0)) """ def demo_runtime_name_error(): """ Example: >>> print('Name Error {}'.format(foo)) """ def demo_runtime_warning(): """ Example: >>> import warnings >>> warnings.warn('in-code warning') """ if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/dev/demo/demo_errors.py all """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/dev/demo/demo_issues.py000066400000000000000000000033531505122333300221260ustar00rootroot00000000000000def demo_requires_skips_all_v1(): """ Example: >>> # xdoctest: +REQUIRES(--cliflag) >>> print('hello world') """ def demo_requires_skips_all_v2(): """ Example: >>> # xdoctest: +REQUIRES(module:xdoctest) >>> # xdoctest: +REQUIRES(--cliflag) >>> print('hello world') """ def demo(): """ CommandLine: # Correctly reports skipped (although an only skipped test report # should probably be yellow) xdoctest -m dev/demo/demo_issues.py demo_requires_skips_all_v1 # Incorrectly reports success xdoctest -m dev/demo/demo_issues.py demo_requires_skips_all_v2 # Correctly reports success xdoctest -m dev/demo/demo_issues.py demo_requires_skips_all_v2 --cliflag # Correctly reports success xdoctest -m dev/demo/demo_issues.py demo_requires_skips_all_v1 --cliflag """ # Programmatic reproduction (notice the first one also reports itself in # pytest mode which is also wrong) import xdoctest xdoctest.doctest_callable(demo_requires_skips_all_v1) xdoctest.doctest_callable(demo_requires_skips_all_v2) import sys, ubelt sys.path.append(ubelt.expandpath('~/code/xdoctest/dev/demo')) import demo_issues # Correctly reports skipped xdoctest.doctest_module(demo_issues, command='demo_requires_skips_all_v1', argv=[]) # Incorrectly reports passed xdoctest.doctest_module(demo_issues, command='demo_requires_skips_all_v2', argv=[]) # argv not respected? xdoctest.doctest_module(demo_issues, command='demo_requires_skips_all_v1', argv=['--cliflag']) # argv not respected? xdoctest.doctest_module(demo_issues, command='demo_requires_skips_all_v2', argv=['--cliflag']) Erotemic-xdoctest-fac8308/dev/demo/demo_properties.py000066400000000000000000000103311505122333300230010ustar00rootroot00000000000000# This demonstrates dynamics of properties with clobbering any # namespace variables, so its entirely clear what variables exist and how they # are transformed as the property dectorators are applied import pytest def my_getter_func(self): " getter doc" print('call getter') return 'value' def my_setter_func(self, value): " setter doc" print('call setter for value = {!r}'.format(value)) def my_deleter_func(self): " deleter doc" print('call deleter') # Use the property decorator directly with normal call syntax # Note properties --- like most other decorators --- return new function # objects and do not change the underlying function object. Hence we can still # print the original functions and see how they are assigned to the fset / fget # / fdel attributes of the returned property object. my_getter_prop = property(my_getter_func) my_setter_prop = my_getter_prop.setter(my_setter_func) my_deleter_prop = my_setter_prop.deleter(my_deleter_func) print('my_getter_func = {!r}'.format(my_getter_func)) print('my_setter_func = {!r}'.format(my_setter_func)) print('my_deleter_func = {!r}'.format(my_deleter_func)) print('my_getter_prop = {!r}'.format(my_getter_prop)) print('my_setter_prop = {!r}'.format(my_setter_prop)) print('my_deleter_prop = {!r}'.format(my_deleter_prop)) print('my_getter_prop = {!r}'.format(my_getter_prop)) print('my_getter_prop.fget = {!r}'.format(my_getter_prop.fget)) print('my_getter_prop.fset = {!r}'.format(my_getter_prop.fset)) print('my_getter_prop.fdel = {!r}'.format(my_getter_prop.fdel)) print('my_getter_prop.__doc__ = {!r}'.format(my_getter_prop.__doc__)) print('my_setter_prop = {!r}'.format(my_setter_prop)) print('my_setter_prop.fget = {!r}'.format(my_setter_prop.fget)) print('my_setter_prop.fset = {!r}'.format(my_setter_prop.fset)) print('my_setter_prop.fdel = {!r}'.format(my_setter_prop.fdel)) print('my_setter_prop.__doc__ = {!r}'.format(my_setter_prop.__doc__)) print('my_deleter_prop = {!r}'.format(my_deleter_prop)) print('my_deleter_prop.fget = {!r}'.format(my_deleter_prop.fget)) print('my_deleter_prop.fset = {!r}'.format(my_deleter_prop.fset)) print('my_deleter_prop.fdel = {!r}'.format(my_deleter_prop.fdel)) print('my_deleter_prop.__doc__ = {!r}'.format(my_deleter_prop.__doc__)) # Note: each function has its own doc, but only the doc of the getter is stored # my_getter_func = # my_setter_func = # my_deleter_func = # my_getter_prop = # my_setter_prop = # my_deleter_prop = # my_getter_prop = # my_getter_prop.fget = # my_getter_prop.fset = None # my_getter_prop.fdel = None # my_getter_prop.__doc__ = ' getter doc' # my_setter_prop = # my_setter_prop.fget = # my_setter_prop.fset = # my_setter_prop.fdel = None # my_setter_prop.__doc__ = ' getter doc' # my_deleter_prop = # my_deleter_prop.fget = # my_deleter_prop.fset = # my_deleter_prop.fdel = # my_deleter_prop.__doc__ = ' getter doc' # Create an empty type class Husk: pass # Assigning properties to the class itself is equivalent to how they are # normally defined in the scope of the class definition. Husk.x = my_deleter_prop Husk.y = my_setter_prop Husk.z = my_getter_prop # Creating an instance of the class will let us use our property variables self = Husk() # The "deleter" property has fget, fset, and fdel defined self.x self.x = 3 del self.x # The "setter" property only had fget and fset defined self.y self.y = 3 with pytest.raises(AttributeError): del self.y # The "getter" property only had fget defined self = Husk() self.z with pytest.raises(AttributeError): self.z = 3 del self.z Erotemic-xdoctest-fac8308/dev/demo/demo_usage_with_logger.py000066400000000000000000000026611505122333300243120ustar00rootroot00000000000000""" demo_usage_with_logger.py Script to demo a workaround to [Issue111]_. CommandLine: # Run with xdoctest runner xdoctest ~/code/xdoctest/dev/demo/demo_usage_with_logger.py # Run with pytest runner pytest -s --xdoctest --xdoctest-verbose=3 ~/code/xdoctest/dev/demo/demo_usage_with_logger.py # Run with builtin main python ~/code/xdoctest/dev/demo/demo_usage_with_logger.py References: .. [Issue111] https://github.com/Erotemic/xdoctest/issues/111 """ import logging import sys class StreamHandler2(logging.StreamHandler): def __init__(self, _getstream=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. """ logging.Handler.__init__(self) if _getstream is None: _getstream = lambda: sys.stderr # NOQA self._getstream = _getstream self.__class__.stream = property(lambda self: self._getstream()) def setStream(self, stream): raise NotImplementedError handler = StreamHandler2(lambda: sys.stdout) _log = logging.getLogger('mylog') _log.setLevel(logging.INFO) _log.addHandler(handler) _log.info('hello') _log.info('hello hello') def func_with_doctest(): """ Example: >>> _log.info('greetings from my doctest') greetings from my doctest """ def main(): import xdoctest xdoctest.doctest_callable(func_with_doctest) if __name__ == '__main__': main() Erotemic-xdoctest-fac8308/dev/devcheck/000077500000000000000000000000001505122333300200615ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/dev/devcheck/interactive_embed_tests.py000066400000000000000000000011021505122333300253200ustar00rootroot00000000000000 def interative_test_xdev_embed(): """ CommandLine: xdoctest -m dev/demo/interactive_embed_tests.py interative_test_xdev_embed Example: >>> interative_test_xdev_embed() """ import xdev with xdev.embed_on_exception_context: raise Exception def interative_test_ipdb_embed(): """ CommandLine: xdoctest -m dev/demo/interactive_embed_tests.py interative_test_ipdb_embed Example: >>> interative_test_ipdb_embed() """ import ipdb with ipdb.launch_ipdb_on_exception(): raise Exception Erotemic-xdoctest-fac8308/dev/extension_proposal.py000066400000000000000000000066671505122333300226310ustar00rootroot00000000000000""" I'm thinking about extending #xdoctest. My thought is that lack of docstring syntax highlighting and the mandatory >>> are the greatest barriers to entry. Editors must address the former, but xdoctest could address the later. Currently a google-style doctest looks like this Example: >>> nums = [104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] >>> chrs = list(map(chr, nums)) >>> text = ''.join(chrs) >>> assert len(text) == 11, 'I like asserts better than got/want checks' >>> print(text) hello world But perhaps it could look like this Example: nums = [104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] chrs = list(map(chr, nums)) text = ''.join(chrs) assert len(text) == 11, 'I like asserts better than got/want checks' print(text) hello world I see two disadvantages. First, even the editors that do support doctest highlights wont support this new style off the bat, but it does make the darn things a lot easier to type and work with without special editor extensions (like the ones I use in vimtk). Second, it is hard to distinguish code inputs from expected got/want output, and it also must exist in the context of a google-style example block. The second case might be addressed by a less intrusive header. We could do something IPython-like. Example: In [1]: nums = [104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] chrs = list(map(chr, nums)) text = ''.join(chrs) assert len(text) == 11, 'I like asserts better than got/want checks' print(text) Out [1]: hello world But I don't like the additional indentation (and I think if there is a trailing `:`, you really ought to have indentation) , and the numbering might get annoying. Maybe break it into two google-docstr-style blocks? Example In[]: nums = [104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] chrs = list(map(chr, nums)) text = ''.join(chrs) assert len(text) == 11, 'I like asserts better than got/want checks' print(text) Example Out[]: hello world As an additional extension we could do something to support an RST-style doctest header. .. code:: python nums = [104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] chrs = list(map(chr, nums)) text = ''.join(chrs) assert len(text) == 11, 'I like asserts better than got/want checks' print(text) hello world Again, the problem of distinguishing inputs / outputs persists. 2023-01-27: New idea: backticks Syntax Idea #1: Use markdown with explicit python tags def foobar(): ''' Example: ```python x = 1 y = 2 print(x + y) ``` ```output 3 ``` ''' Syntax Idea #2: Use a simplified markdown tag that is reminicent of markdown def foobar(): ''' Example: `python x = 1 y = 2 print(x + y) `output 3 ''' Syntax Idea #3: Lead output with an xdoctest directive def foobar(): ''' Example: # xdoctest: example x = 1 y = 2 print(x + y) # xdoctest: output 3 ''' Shorter prefixes? def foobar(): ''' Example: > x = 1 > y = 2 > print(x + y) 3 ''' First line matters? def foobar(): ''' Example: .. code:: python x = 1 y = 2 print(x + y) .. code:: output 3 ''' """ Erotemic-xdoctest-fac8308/dev/fix_sphinx_mwe.py000066400000000000000000000032141505122333300217060ustar00rootroot00000000000000 def mwe(): # TODO: show that sphinx fails to parse this, and use xdoctest to make # things better. # This used to be a perfectly valid docstring in xdoctest, but I've since # changed it so it doesn't cause sphinx problems. docstring = r""" Example: >>> from xdoctest import core >>> from xdoctest import utils >>> docstr = utils.codeblock( ''' freeform >>> doctest >>> hasmultilines whoppie >>> 'butthis is the same doctest' >>> secondone Script: >>> 'special case, dont parse me' DisableDoctest: >>> 'special case, dont parse me' want AnythingElse: >>> 'general case, parse me' want ''') >>> examples = list(parse_freeform_docstr_examples(docstr, asone=True)) >>> assert len(examples) == 1 >>> examples = list(parse_freeform_docstr_examples(docstr, asone=False)) >>> assert len(examples) == 3 """ print('docstring = {!r}'.format(docstring)) # pass this to sphinx and show it breaks because whitespace is not added to # pad between the lines, even though they should share lexical scope. # Some people say whitespace is not syntax. They are half-correct. # It should not count as Python syntax if its a blank line with no # trailing non-space characters (unless its wrapped in string quotes). # Spaces are great syntax, but only when they have non-space content after # them. Erotemic-xdoctest-fac8308/dev/gpg_owner_trust.enc000066400000000000000000000011421505122333300222220ustar00rootroot00000000000000U2FsdGVkX19oujbzowBk+dNt+Dy9W/HnC8+AOUeNqFfqZmZ6whz73t1g4XcSoCUQ Lk2Oi5j9rlhtXWMuNid5cBIRLxTxum38OVw8IZMSV7xo91cPiXXcT2iphr4BI+Bf 0fNqXCyK+voeZJunAnrgZ5zOtxU8WGyNJvRrJ4hYVmXXdBfud+AJN2fU4M0m/4I5 Z10JcLx2T0LH+v3dCNvdOTN2w+6+7nAx+gXYGbF6+wYlE5wdTW0h+hRPiIfV3dCR OR4J4DYoshLENCo/k+C727fsMzakzdlCqo5xWbRJdJ/IRkHQcn5ybJdf78NV2QXP SrQjS0c7Aunh+ZI/xrmFxrVDz5fJQPPY07ZH5VgZrDvv4sDN8ZHa2kT40GeYH82J mai+wv5Vw5A6O9VvkYL2sEXfX5zeOqwFXsy1/Kwf38Bq7hl3Bxmfuw2w8kHWU2nz gM0HJ0qVK9t3vagjyr3cVYClPoqtcWu+BfLFPIbOgvCHfMtKJNvtgpyLR9i773fZ BwXxbc3gM36pDKHjxXmem91RyW5qCx+mj91Dg8YP0WNiat3EVlIkIsUgUBd2dxBj CInMaJVkLehBndgMz3MGYw== Erotemic-xdoctest-fac8308/dev/maintain/000077500000000000000000000000001505122333300201055ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/dev/maintain/port_ubelt_utils.py000066400000000000000000000044661505122333300240700ustar00rootroot00000000000000""" Statically ports utilities from ubelt needed by xdoctest. Similar Scripts: ~/code/xdoctest/dev/maintain/port_ubelt_utils.py ~/code/mkinit/dev/maintain/port_ubelt_code.py ~/code/line_profiler/dev/maintain/port_utilities.py """ def _autogen_xdoctest_utils(): import ubelt as ub import liberator lib = liberator.Liberator() from ubelt import util_import lib.add_dynamic(util_import.split_modpath) lib.add_dynamic(util_import.modpath_to_modname) lib.add_dynamic(util_import.modname_to_modpath) lib.add_dynamic(util_import.import_module_from_name) lib.add_dynamic(util_import.import_module_from_path) lib.add_dynamic(util_import._pkgutil_modname_to_modpath) lib.add_dynamic(util_import._importlib_import_modpath) lib.add_dynamic(util_import.is_modname_importable) lib.expand(['ubelt']) text = lib.current_sourcecode() print(text) """ pip install rope pip install parso """ # target_fpath = ub.Path('~/code/xdoctest/src/xdoctest/utils/util_import.py').expand() import parso import xdoctest target_fpath = ub.Path(xdoctest.utils.util_import.__file__) new_module = parso.parse(text) old_module = parso.parse(target_fpath.read_text()) new_names = [child.name.value for child in new_module.children if child.type in {'funcdef', 'classdef'}] old_names = [child.name.value for child in old_module.children if child.type in {'funcdef', 'classdef'}] print(set(old_names) - set(new_names)) print(set(new_names) - set(old_names)) prefix = ub.codeblock( ''' """ This file was autogenerated based on code in ubelt via dev/port_ubelt_utils.py in the xdoctest repo """ ''') # Remove doctest references to ubelt new_lines = [] import re for line in text.split('\n'): if line.strip().startswith('>>> from ubelt'): continue if line.strip().startswith('>>> import ubelt as ub'): line = re.sub('>>> .*', '>>> # xdoctest: +SKIP("ubelt dependency")', line) new_lines.append(line) text = '\n'.join(new_lines) target_fpath.write_text(prefix + '\n' + text + '\n') if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/dev/maintain/port_ubelt_utils.py """ _autogen_xdoctest_utils() Erotemic-xdoctest-fac8308/dev/make_rtd.py000066400000000000000000000031021505122333300204410ustar00rootroot00000000000000""" http://docs.readthedocs.io/en/latest/getting_started.html#in-rst pip install sphinx sphinx-autobuild pip install sphinxcontrib-napoleon pip install sphinx_rtd_theme """ def initialize_docs(): from os.path import join import setup setupkw = setup.setupkw full_version = setup.parse_version() short_version = '.'.join(full_version.split('.')[0:2]) doc_dpath = join('.', 'docs') exe = 'sphinx-apidoc' args = [ exe, '--force', '--full', '--output-dir="{}"'.format(doc_dpath), '--doc-author="{}"'.format(setupkw['author']), '--doc-version="{}"'.format(short_version), '--doc-release="{}"'.format(full_version), '--maxdepth="8"', # '--ext-autodoc', # '--ext-ifconfig', # '--ext-githubpages', # '--ext-mathjax', setupkw['name'], ] cmdstr = ' '.join(args) import ubelt as ub result = ub.cmd(cmdstr, verbose=2) assert result['ret'] == 0 def modify_conf(): """ pip install redbaron """ import redbaron import ubelt as ub conf_path = 'docs/conf.py' source = ub.readfrom(conf_path) red = redbaron.RedBaron(source) # Insert custom extensions extra_extensions = [ '"sphinxcontrib.napoleon"' ] ext_node = red.find('name', value='extensions').parent ext_node.value.value.extend(extra_extensions) # Overwrite theme to read-the-docs theme_node = red.find('name', value='html_theme').parent theme_node.value.value = '"sphinx_rtd_theme"' ub.writeto(conf_path, red.dumps()) Erotemic-xdoctest-fac8308/dev/outline.md000066400000000000000000000243351505122333300203150ustar00rootroot00000000000000Title ===== Developing With Doctests - using Xdoctest as a backend Description =========== This talk is about doctests --- a way to embed examples and unit tests in docstrings. I describe what they are, how to write them, and interesting ways in which they can be used. Doctests make it easy to interactively develop code by defining “demo” inputs which can be copied into IPython. The side effect is a unit test. This is test-driven-development at its best. I explain the technical details of doctest syntax, directives, parsing, and execution. Unfortunately, Python’s builtin doctest module has a restrictive syntax, which makes it difficult to use. In the second part of the talk I introduce an alternative: Xdoctest, a new, but stable package for parsing and running doctests (with optional pytest integration). I explain why doctest’s regex-based parser is fundamentally limited and how xdoctest’s ast-based parser is the remedy. I demonstrate how to use xdoctest and discuss its advantages and disadvantages. By the end of this talk you feel confident in writing, running, and developing with doctests. https://us.pycon.org/2020/speaking/talks/ Who and Why (Audience) ====================== 1. Who is this talk for?: * Python developers wishing to master test-driven-design * People who have been burnt by the doctest module * People who want to learn an interesting way to test Python code 2. What background knowledge or experience do you expect the audience to have?: * The ability to read and write Python code (required) * What a docstring is (nice to have) * Some familiarity with IPython (nice to have) 3. What do you expect the audience to learn after watching the talk?: * Why doctests are awesome * How to write a doctest * How to run a test with the builtin doctest module and where issues arise * How to fix these issues with xdoctest * A little math trivia about how you can’t parse a context-free language with the re module References: https://pythontesting.net/framework/doctest/doctest-introduction/ Developing With Doctests ======================== ## PART 0: INTRODUCTION ### INTRODUCTION - 1 MINUTE * Background * Interests * Code-ography ## PART 1 - INTRODUCTION TO DOCTESTS ### WHAT ARE DOCTESTS - 2 MINUTES * Prefixed code in a docstring * Documentation * Demonstrates your code * Testing * Coverage * Correctness * Continuous Integration / Distribution (CI / CD) *shows an example of a doctest in a function with a zoomed in view of the doctest itself* ### WHAT CAN DOCTESTS DO? - 2 MINUTES * Encourage you to create “demo data” * Let you “play” with the code * Similar to Jupyter notebooks (but they help you publish packages instead of papers) * Show "ENTRYPOINTS - ENTRYPOINTS EVERYWHERE" meme *shows a more complex example where I use doctests to create an entrypoint that lets me play with my code in IPython* ### HOW TO WRITE A DOCTEST - 1 MINUTE * Write a test / example function * Simply prefix it with >>> * Stick it in the docstring *shows a toy example for each bullet point* ### CASE STUDY #1 - 3 MINUTES * Write a doctest for the `paragraph` function *shows as the paragraph function and the test we are doing to "doctest-ify"* *shows what the doctest looks like inside the function* * Runs our doctest using the builtin doctest module and runs into errors * Discuss those errors *shows errors and zooms into a relevant region of the output* * Fix those errors * Rerun doctest and show the errors are fixed *shows the fixed doctest syntax and the result of a successful run* ### CASE STUDY #2 - 3 MINUTES * Write a doctest for the `allsame` function * Run the `allsame` doctest and run into errors * Discuss these errors *shows a doctest inside this function and demonstrates using "got/want" checks* *shows error output and a zoomed in view to the relevant region* * Attempt to fix those errors * Talk about doctest directives * Attempt to fix those errors (this wont go well to demonstrate how frustrating) *shows "fixed" doctest where we use directives to modify "got/want" behavior* ### WHY AREN’T DOCTESTS EVERYWHERE? - 1 MINUTE * They are… somewhat, but they aren’t always working * The builtin doctest module is… lacking * Syntax issues * Difficult error messages * Slightly overcomplicated * Don’t worry I have a solution *shows an image foreshadowing that xdoctest is the solution* ## PART 2 - XDOCTEST ### THE XDOCTEST MODULE - 1 MINUTE * Mostly backwards compatible * Simpler syntax * One golden rule: “>>>” *image is the xdoctest logo* ### QUICK ASIDE ON FORMAL LANGUAGES - 2 MINUTES * Chomsky Hierarchy * Briefly introduce recursively enumerable, context sensitive, context free, and regular languages. * Note on the richness of the subject, but don't go too deep into it *shows an image of the simple 4-level hierarchy with an image (G)Noam Chomsky and of Alan Turing (next to recursively enumerable languages), the left shows the richer version with 14 levels* ### FORMAL PYTHON - 2 MINUTES * Where Python sits on the Chomsky Hierarchy * Explain that doctest uses regular expressions to "parse" its code * Explain why regular expressions cannot parse code and give balanced parenthesis as an example * Discuss why doctests need `ast` not `re` * Stress the point that doctests are Python code *shows a different image of the 4-level hierarchy and an example programming language for each level* ### REVISIT CASE STUDY #1 - 1 MINUTE * Discuss how and why the original syntax will work with xdoctest * Discuss how xdoctest can use ast to determine where statements start and end * Show leniency of xdoctest syntax with respect to multi-line strings *shows an image of the original doctest from case study 1* *shows the successful xdoctest output* ### REVISIT CASE STUDY #2 - 1 MINUTE * Discuss how and why the original syntax will work with xdoctest * Run the original doctest with xdoctest to show that it works *shows an image of the original doctest from case study 2* *shows the successful xdoctest output* ### XDOCTEST IS EASIER TO RUN - 1 MINUTE * Easier to run *shows examples of different command line invocations* ### XDOCTEST HAS BETTER OUTPUT - 1 MINUTE * Command to reproduce errors * Line numbers * Pygments colorized output *shows list of failed tests, highlights line numbers in xdoctest output* ### XDOCTEST HAS BETTER DIRECTIVES - 1 MINUTE * List which original directives are backwards compatible: * DONT_ACCEPT_BLANKLINE * ELLIPSIS * NORMALIZE_WHITESPACE * IGNORE_EXCEPTION_DETAIL * NORMALIZE_WHITESPACE * REPORT_CDIFF * REPORT_NDIFF * REPORT_UDIFF * List which original directives are not implemented in xdoctest: * DONT_ACCEPT_TRUE_FOR_1 * REPORT_ONLY_FIRST_FAILURE * REPORTING_FLAGS * COMPARISON_FLAGS * Note on tweaked defaults * List new directives: * SKIP * IGNORE_WANT * REQUIRES(.) * Demo SKIP directive * Demo REQUIRES directive *this slide is all text* * Continue slide showing that xdoctest lets directives be applied to blocks of code instead of just a single line *shows an example of "block"-directive* *shows xdoctest output when running the example* ### XDOCTEST HAS A PYTEST PLUGIN - 1 MINUTE * Pytest runner * Xdoctest ships with pytest integration * pip install xdoctest * pytest --xdoctest *shows the output of running pytest with the xdoctest plugin enabled* ### WHEN YOUR DOCTESTS WORK / FAIL - 1 MINUTE * It shows how many tests will run * It displays the source code and output of each test that is run * It prints a big friendly exit message indicating that everything worked great and you are a wonderful programmer *shows zoomed in command line to invoke multiple doctests, the intermediate output, and a zoomed view of the final few lines* * Prints how many and what tests failed * Provides shell commands to reproduce errors * It displays the source code and output of each test that is run. This includes the error message * Provides additional information when using got / want tests *shows zoomed in command line to invoke multiple doctests, the intermediate output, and a zoomed view of the final few lines* ### MISCELLANEOUS FEATURES IN XDOCTESTS - 1 MINUTE * Zero args runner * Random utilities * How xdoctest was used to obtain 100% test coverage in ubelt *shows reprise of ENTRYPOINTS meme, ubelt logo, and the 100% code coverage badge* ### LIMITATIONS - 1 MINUTE * It’s a tad slower * It’s not 100% backwards compatible * Some directives no longer exist *this slide is just text* ## PART 3 - WRAPPING UP ### DOCTESTS -VS- XDOCTEST - 1 MINUTE * Doctest: * Built into the standard library stdlib * Uses re (regular expressions) * Restrictive syntax * Terse output * Runs one file at a time * Massive inertia * Xdoctset: * External pip installable module * Uses ast ( abstract syntax trees ) * Relaxed syntax * Better directives * Colored output * Mostly backwards compatible * Runs single functions or entire modules * Works on CPython 2.7+ and PyPy * 34 stars on GitHub * 3.4k downloads / month *this slide is just text* ### CONTRIBUTING - 1 MINUTE * Submit a pull request to GitHub * Areas for contribution * Parsing could be better * External docs could be better * Building on top * Xdoctest exposes an easy way to parse doctests and get line numbers. There are cool refactoring tools that could be built on top of its framework *shows xdoctest GitHub page with CI badges* ### SUMMARY & CONCLUSION - 1 MINUTE * Introduction to Doctests * What it was * Strengths * Weaknesses * Xdoctest * Improvements * Limitations * Cool stuff * Questions (time permitting) *shows image of Noam Chomsky reminding the audience that regular expressions are for tokens and abstract syntax trees are for Python code* Erotemic-xdoctest-fac8308/dev/parse_changelog.py000066400000000000000000000031551505122333300220040ustar00rootroot00000000000000def _parse_changelog(fpath): """ Helper to parse the changelog for the version to verify versions agree. CommandLine: xdoctest -m dev/parse_changelog.py _parse_changelog --dev """ try: from packaging.version import parse as LooseVersion except ImportError: from distutils.version import LooseVersion import re pat = re.compile(r'#.*Version ([0-9]+\.[0-9]+\.[0-9]+)') # We can statically modify this to a constant value when we deploy versions = [] with open(fpath, 'r') as file: for line in file.readlines(): line = line.rstrip() if line: parsed = pat.search(line) if parsed: print('parsed = {!r}'.format(parsed)) try: version_text = parsed.groups()[0] version = LooseVersion(version_text) versions.append(version) except Exception: print('Failed to parse = {!r}'.format(line)) import pprint print('versions = {}'.format(pprint.pformat(versions))) assert sorted(versions)[::-1] == versions import xdoctest changelog_version = versions[0] module_version = LooseVersion(xdoctest.__version__) print('changelog_version = {!r}'.format(changelog_version)) print('module_version = {!r}'.format(module_version)) assert changelog_version == module_version if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/dev/parse_changelog.py """ fpath = 'CHANGELOG.md' _parse_changelog(fpath) Erotemic-xdoctest-fac8308/dev/parse_rst.py000066400000000000000000000025001505122333300206560ustar00rootroot00000000000000""" It might be a good idea to be able to parse RST blocks and doctest them. """ def parse_rst_codeblocks(fpath): """ import ubelt as ub fpath = ub.expandpath('$HOME/code/xdoctest/README.rst') """ # Probably a better way to to this with open(fpath, 'r') as file: text = file.read() blocks = [] valid_code_headers = [ '.. code-block::', '.. code::', ] curr = None for lineno, line in enumerate(text.split('\n')): found_header = None for header in valid_code_headers: if line.startswith(header): found_header = header if found_header is not None: curr = { 'lineno_start': lineno, 'lineno_end': None, 'lines': [], 'header': line, 'language': line.replace(found_header, '').strip() } else: if curr is not None: if line and not line.startswith(' '): curr['lines'] curr['lineno_end'] = lineno - 1 blocks.append(curr) curr = None else: curr['lines'].append(line) # import ubelt as ub # print('blocks = {}'.format(ub.repr2(blocks, nl=3))) return blocks Erotemic-xdoctest-fac8308/dev/public_gpg_key000066400000000000000000000000511505122333300212070ustar00rootroot0000000000000070858F4D01314BF21427676F3D568E6559A34380 Erotemic-xdoctest-fac8308/dev/run_linter.py000066400000000000000000000037551505122333300210520ustar00rootroot00000000000000 def main(): flake8_errors = [ 'E126', # continuation line hanging-indent 'E127', # continuation line over-indented for visual indent 'E201', # whitespace after '(' 'E202', # whitespace before ']' 'E203', # whitespace before ', ' 'E221', # multiple spaces before operator (TODO: I wish I could make an exception for the equals operator. Is there a way to do this?) 'E222', # multiple spaces after operator 'E241', # multiple spaces after , 'E265', # block comment should start with "# " 'E271', # multiple spaces after keyword 'E272', # multiple spaces before keyword 'E301', # expected 1 blank line, found 0 'E305', # expected 1 blank line after class / func 'E306', # expected 1 blank line before func #'E402', # module import not at top 'E501', # line length > 79 'W602', # Old reraise syntax 'E266', # too many leading '#' for block comment 'N801', # function name should be lowercase [N806] 'N802', # function name should be lowercase [N806] 'N803', # argument should be lowercase [N806] 'N805', # first argument of a method should be named 'self' 'N806', # variable in function should be lowercase [N806] 'N811', # constant name imported as non constant 'N813', # camel case 'W504', # line break after binary operator ] flake8_args_list = [ '--max-line-length 79', #'--max-line-length 100', '--ignore=' + ','.join(flake8_errors) ] flake8_args = ' '.join(flake8_args_list) import ubelt as ub import sys loc = ub.expandpath('~/code/xdoctest/xdoctest') command = 'flake8 ' + flake8_args + ' ' + loc print('command = {!r}'.format(command)) info = ub.cmd(command, verbose=3) sys.exit(info['ret']) if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/dev/run_linter.py """ main() Erotemic-xdoctest-fac8308/dev/secrets_configuration.sh000066400000000000000000000005721505122333300232440ustar00rootroot00000000000000export VARNAME_CI_SECRET="EROTEMIC_CI_SECRET" export VARNAME_TWINE_PASSWORD="EROTEMIC_PYPI_MASTER_TOKEN" export VARNAME_TEST_TWINE_PASSWORD="EROTEMIC_TEST_PYPI_MASTER_TOKEN" export VARNAME_TWINE_USERNAME="EROTEMIC_PYPI_MASTER_TOKEN_USERNAME" export VARNAME_TEST_TWINE_USERNAME="EROTEMIC_TEST_PYPI_MASTER_TOKEN_USERNAME" export GPG_IDENTIFIER="=Erotemic-CI " Erotemic-xdoctest-fac8308/dev/setup_secrets.sh000066400000000000000000000471461505122333300215450ustar00rootroot00000000000000#!/usr/bin/env bash __doc__=' ============================ SETUP CI SECRET INSTRUCTIONS ============================ TODO: These instructions are currently pieced together from old disparate instances, and are not yet fully organized. The original template file should be: ~/code/xcookie/dev/setup_secrets.sh Development script for updating secrets when they rotate The intent of this script is to help setup secrets for whichever of the following CI platforms is used: ../.github/workflows/tests.yml ../.gitlab-ci.yml ../.circleci/config.yml ========================= GITHUB ACTION INSTRUCTIONS ========================= * `PERSONAL_GITHUB_PUSH_TOKEN` - This is only needed if you want to automatically git-tag release branches. To make a API token go to: https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token ========================= GITLAB ACTION INSTRUCTIONS ========================= ```bash cat .setup_secrets.sh | \ sed "s|utils||g" | \ sed "s|xcookie||g" | \ sed "s|travis-ci-Erotemic||g" | \ sed "s|CI_SECRET||g" | \ sed "s|GITLAB_ORG_PUSH_TOKEN||g" | \ sed "s|gitlab.org.com|gitlab.your-instance.com|g" | \ tee /tmp/repl && colordiff .setup_secrets.sh /tmp/repl ``` * Make sure you add Runners to your project https://gitlab.org.com/utils/xcookie/-/settings/ci_cd in Runners-> Shared Runners and Runners-> Available specific runners * Ensure that you are auto-cancel redundant pipelines. Navigate to https://gitlab.kitware.com/utils/xcookie/-/settings/ci_cd and ensure "Auto-cancel redundant pipelines" is checked. More details are here https://docs.gitlab.com/ee/ci/pipelines/settings.html#auto-cancel-redundant-pipelines * TWINE_USERNAME - this is your pypi username twine info is only needed if you want to automatically publish to pypi * TWINE_PASSWORD - this is your pypi password * CI_SECRET - We will use this as a secret key to encrypt/decrypt gpg secrets This is only needed if you want to automatically sign published wheels with a gpg key. * GITLAB_ORG_PUSH_TOKEN - This is only needed if you want to automatically git-tag release branches. Create a new personal access token in User->Settings->Tokens, You can name the token GITLAB_ORG_PUSH_TOKEN_VALUE Give it api and write repository permissions SeeAlso: https://gitlab.org.com/profile/personal_access_tokens Take this variable and record its value somewhere safe. I put it in my secrets file as such: export GITLAB_ORG_PUSH_TOKEN_VALUE= I also create another variable with the prefix "git-push-token", which is necessary export GITLAB_ORG_PUSH_TOKEN=git-push-token:$GITLAB_ORG_PUSH_TOKEN_VALUE Then add this as a secret variable here: https://gitlab.org.com/groups/utils/-/settings/ci_cd Note the value of GITLAB_ORG_PUSH_TOKEN will look something like: "{token-name}:{token-password}" For instance it may look like this: "git-push-token:62zutpzqga6tvrhklkdjqm" References: https://stackoverflow.com/questions/51465858/how-do-you-push-to-a-gitlab-repo-using-a-gitlab-ci-job # ADD RELEVANT VARIABLES TO GITLAB SECRET VARIABLES # https://gitlab.kitware.com/computer-vision/kwcoco/-/settings/ci_cd # Note that it is important to make sure that these variables are # only decrpyted on protected branches by selecting the protected # and masked option. Also make sure you have master and release # branches protected. # https://gitlab.kitware.com/computer-vision/kwcoco/-/settings/repository#js-protected-branches-settings ============================ Relevant CI Secret Locations ============================ https://github.com/pyutils/line_profiler/settings/secrets/actions https://app.circleci.com/settings/project/github/pyutils/line_profiler/environment-variables?return-to=https%3A%2F%2Fapp.circleci.com%2Fpipelines%2Fgithub%2Fpyutils%2Fline_profiler ' setup_package_environs(){ __doc__=" Setup environment variables specific for this project. The remainder of this script should ideally be general to any repo. These non-secret variables are written to disk and loaded by the script, such that the specific repo only needs to modify that configuration file. " echo "Choose an organization specific setting or make your own. This needs to be generalized more" } ### FIXME: Should be configurable for general use setup_package_environs_gitlab_kitware(){ echo ' export VARNAME_CI_SECRET="CI_KITWARE_SECRET" export VARNAME_TWINE_PASSWORD="EROTEMIC_PYPI_MASTER_TOKEN" export VARNAME_TEST_TWINE_PASSWORD="EROTEMIC_TEST_PYPI_MASTER_TOKEN" export VARNAME_PUSH_TOKEN="GITLAB_KITWARE_TOKEN" export VARNAME_TWINE_USERNAME="EROTEMIC_PYPI_MASTER_TOKEN_USERNAME" export VARNAME_TEST_TWINE_USERNAME="EROTEMIC_TEST_PYPI_MASTER_TOKEN_USERNAME" export GPG_IDENTIFIER="=Erotemic-CI " ' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh git add dev/secrets_configuration.sh } setup_package_environs_github_erotemic(){ echo ' export VARNAME_CI_SECRET="EROTEMIC_CI_SECRET" export VARNAME_TWINE_PASSWORD="EROTEMIC_PYPI_MASTER_TOKEN" export VARNAME_TEST_TWINE_PASSWORD="EROTEMIC_TEST_PYPI_MASTER_TOKEN" export VARNAME_TWINE_USERNAME="EROTEMIC_PYPI_MASTER_TOKEN_USERNAME" export VARNAME_TEST_TWINE_USERNAME="EROTEMIC_TEST_PYPI_MASTER_TOKEN_USERNAME" export GPG_IDENTIFIER="=Erotemic-CI " ' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh git add dev/secrets_configuration.sh } setup_package_environs_github_pyutils(){ echo ' export VARNAME_CI_SECRET="PYUTILS_CI_SECRET" export VARNAME_TWINE_PASSWORD="PYUTILS_PYPI_MASTER_TOKEN" export VARNAME_TEST_TWINE_PASSWORD="PYUTILS_TEST_PYPI_MASTER_TOKEN" export VARNAME_TWINE_USERNAME="PYUTILS_PYPI_MASTER_TOKEN_USERNAME" export VARNAME_TEST_TWINE_USERNAME="PYUTILS_TEST_PYPI_MASTER_TOKEN_USERNAME" export GPG_IDENTIFIER="=PyUtils-CI " ' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh git add dev/secrets_configuration.sh #echo ' #export VARNAME_CI_SECRET="PYUTILS_CI_SECRET" #export GPG_IDENTIFIER="=PyUtils-CI " #' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh } upload_github_secrets(){ load_secrets unset GITHUB_TOKEN #printf "%s" "$GITHUB_TOKEN" | gh auth login --hostname Github.com --with-token if ! gh auth status ; then gh auth login fi source dev/secrets_configuration.sh gh secret set "TWINE_USERNAME" -b"${!VARNAME_TWINE_USERNAME}" gh secret set "TEST_TWINE_USERNAME" -b"${!VARNAME_TEST_TWINE_USERNAME}" toggle_setx_enter gh secret set "CI_SECRET" -b"${!VARNAME_CI_SECRET}" gh secret set "TWINE_PASSWORD" -b"${!VARNAME_TWINE_PASSWORD}" gh secret set "TEST_TWINE_PASSWORD" -b"${!VARNAME_TEST_TWINE_PASSWORD}" toggle_setx_exit } toggle_setx_enter(){ # Can we do something like a try/finally? # https://stackoverflow.com/questions/15656492/writing-try-catch-finally-in-shell echo "Enter sensitive area" if [[ -n "${-//[^x]/}" ]]; then __context_1_toggle_setx=1 else __context_1_toggle_setx=0 fi if [[ "$__context_1_toggle_setx" == "1" ]]; then echo "Setx was on, disable temporarily" set +x fi } toggle_setx_exit(){ echo "Exit sensitive area" # Can we guarantee this will happen? if [[ "$__context_1_toggle_setx" == "1" ]]; then set -x fi } upload_gitlab_group_secrets(){ __doc__=" Use the gitlab API to modify group-level secrets " # In Repo Directory load_secrets REMOTE=origin GROUP_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 1) HOST=https://$(git remote get-url $REMOTE | cut -d "/" -f 1 | cut -d "@" -f 2 | cut -d ":" -f 1) echo " * GROUP_NAME = $GROUP_NAME * HOST = $HOST " PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") if [[ "$PRIVATE_GITLAB_TOKEN" == "ERROR" ]]; then echo "Failed to load authentication key" return 1 fi TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") echo "GROUP_ID = $GROUP_ID" curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" < "$TMP_DIR/group_info" jq # Get group-level secret variables curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables" > "$TMP_DIR/group_vars" < "$TMP_DIR/group_vars" jq '.[] | .key' if [[ "$?" != "0" ]]; then echo "Failed to access group level variables. Probably a permission issue" fi source dev/secrets_configuration.sh SECRET_VARNAME_ARR=(VARNAME_CI_SECRET VARNAME_TWINE_PASSWORD VARNAME_TEST_TWINE_PASSWORD VARNAME_TWINE_USERNAME VARNAME_TEST_TWINE_USERNAME VARNAME_PUSH_TOKEN) for SECRET_VARNAME_PTR in "${SECRET_VARNAME_ARR[@]}"; do SECRET_VARNAME=${!SECRET_VARNAME_PTR} echo "" echo " ---- " LOCAL_VALUE=${!SECRET_VARNAME} REMOTE_VALUE=$(< "$TMP_DIR/group_vars" jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") # Print current local and remote value of a variable echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" echo "SECRET_VARNAME = $SECRET_VARNAME" echo "(local) $SECRET_VARNAME = $LOCAL_VALUE" echo "(remote) $SECRET_VARNAME = $REMOTE_VALUE" #curl --request GET --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables/SECRET_VARNAME" | jq -r .message if [[ "$REMOTE_VALUE" == "" ]]; then # New variable echo "Remove variable does not exist, posting" toggle_setx_enter curl --request POST --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables" \ --form "key=${SECRET_VARNAME}" \ --form "value=${LOCAL_VALUE}" \ --form "protected=true" \ --form "masked=true" \ --form "environment_scope=*" \ --form "variable_type=env_var" toggle_setx_exit elif [[ "$REMOTE_VALUE" != "$LOCAL_VALUE" ]]; then echo "Remove variable does not agree, putting" # Update variable value toggle_setx_enter curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables/$SECRET_VARNAME" \ --form "value=${LOCAL_VALUE}" toggle_setx_exit else echo "Remote value agrees with local" fi done rm "$TMP_DIR/group_vars" } upload_gitlab_repo_secrets(){ __doc__=" Use the gitlab API to modify group-level secrets " # In Repo Directory load_secrets REMOTE=origin GROUP_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 1) PROJECT_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 2 | cut -d "." -f 1) HOST=https://$(git remote get-url $REMOTE | cut -d "/" -f 1 | cut -d "@" -f 2 | cut -d ":" -f 1) echo " * GROUP_NAME = $GROUP_NAME * PROJECT_NAME = $PROJECT_NAME * HOST = $HOST " PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") if [[ "$PRIVATE_GITLAB_TOKEN" == "ERROR" ]]; then echo "Failed to load authentication key" return 1 fi TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" toggle_setx_exit GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") echo "GROUP_ID = $GROUP_ID" toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" toggle_setx_exit GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") < "$TMP_DIR/group_info" jq PROJECT_ID=$(< "$TMP_DIR/group_info" jq ".projects | map(select(.path==\"$PROJECT_NAME\")) | .[0].id") echo "PROJECT_ID = $PROJECT_ID" # Get group-level secret variables toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables" > "$TMP_DIR/project_vars" toggle_setx_exit < "$TMP_DIR/project_vars" jq '.[] | .key' if [[ "$?" != "0" ]]; then echo "Failed to access project level variables. Probably a permission issue" fi LIVE_MODE=1 source dev/secrets_configuration.sh SECRET_VARNAME_ARR=(VARNAME_CI_SECRET VARNAME_TWINE_PASSWORD VARNAME_TEST_TWINE_PASSWORD VARNAME_TWINE_USERNAME VARNAME_TEST_TWINE_USERNAME VARNAME_PUSH_TOKEN) for SECRET_VARNAME_PTR in "${SECRET_VARNAME_ARR[@]}"; do SECRET_VARNAME=${!SECRET_VARNAME_PTR} echo "" echo " ---- " LOCAL_VALUE=${!SECRET_VARNAME} REMOTE_VALUE=$(< "$TMP_DIR/project_vars" jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") # Print current local and remote value of a variable echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" echo "SECRET_VARNAME = $SECRET_VARNAME" echo "(local) $SECRET_VARNAME = $LOCAL_VALUE" echo "(remote) $SECRET_VARNAME = $REMOTE_VALUE" #curl --request GET --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables/SECRET_VARNAME" | jq -r .message if [[ "$REMOTE_VALUE" == "" ]]; then # New variable echo "Remove variable does not exist, posting" if [[ "$LIVE_MODE" == "1" ]]; then curl --request POST --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables" \ --form "key=${SECRET_VARNAME}" \ --form "value=${LOCAL_VALUE}" \ --form "protected=true" \ --form "masked=true" \ --form "environment_scope=*" \ --form "variable_type=env_var" else echo "dry run, not posting" fi elif [[ "$REMOTE_VALUE" != "$LOCAL_VALUE" ]]; then echo "Remove variable does not agree, putting" # Update variable value if [[ "$LIVE_MODE" == "1" ]]; then curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables/$SECRET_VARNAME" \ --form "value=${LOCAL_VALUE}" else echo "dry run, not putting" fi else echo "Remote value agrees with local" fi done rm "$TMP_DIR/project_vars" } export_encrypted_code_signing_keys(){ # You will need to rerun this whenever the signkeys expire and are renewed # Load or generate secrets load_secrets source dev/secrets_configuration.sh CI_SECRET="${!VARNAME_CI_SECRET}" echo "VARNAME_CI_SECRET = $VARNAME_CI_SECRET" echo "CI_SECRET=$CI_SECRET" echo "GPG_IDENTIFIER=$GPG_IDENTIFIER" # ADD RELEVANT VARIABLES TO THE CI SECRET VARIABLES # HOW TO ENCRYPT YOUR SECRET GPG KEY # You need to have a known public gpg key for this to make any sense MAIN_GPG_KEYID=$(gpg --list-keys --keyid-format LONG "$GPG_IDENTIFIER" | head -n 2 | tail -n 1 | awk '{print $1}') GPG_SIGN_SUBKEY=$(gpg --list-keys --with-subkey-fingerprints "$GPG_IDENTIFIER" | grep "\[S\]" -A 1 | tail -n 1 | awk '{print $1}') # Careful, if you don't have a subkey, requesting it will export more than you want. # Export the main key instead (its better to have subkeys, but this is a lesser evil) if [[ "$GPG_SIGN_SUBKEY" == "" ]]; then # NOTE: if you get here this probably means your subkeys expired (and # wont even be visible), so we probably should check for that here and # thrown an error instead of using this hack, which likely wont work # anyway. GPG_SIGN_SUBKEY=$(gpg --list-keys --with-subkey-fingerprints "$GPG_IDENTIFIER" | grep "\[C\]" -A 1 | tail -n 1 | awk '{print $1}') fi echo "MAIN_GPG_KEYID = $MAIN_GPG_KEYID" echo "GPG_SIGN_SUBKEY = $GPG_SIGN_SUBKEY" # Only export the signing secret subkey # Export plaintext gpg public keys, private sign key, and trust info mkdir -p dev gpg --armor --export-options export-backup --export-secret-subkeys "${GPG_SIGN_SUBKEY}!" > dev/ci_secret_gpg_subkeys.pgp gpg --armor --export "${GPG_SIGN_SUBKEY}" > dev/ci_public_gpg_key.pgp gpg --export-ownertrust > dev/gpg_owner_trust # Encrypt gpg keys and trust with CI secret GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/ci_public_gpg_key.pgp > dev/ci_public_gpg_key.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/ci_secret_gpg_subkeys.pgp > dev/ci_secret_gpg_subkeys.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/gpg_owner_trust > dev/gpg_owner_trust.enc echo "$MAIN_GPG_KEYID" > dev/public_gpg_key # Test decrpyt GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --list-packets --verbose GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | gpg --list-packets --verbose GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc cat dev/public_gpg_key unload_secrets # Look at what we did, clean up, and add it to git ls dev/*.enc rm dev/*.pgp rm dev/gpg_owner_trust git status git add dev/*.enc git add dev/gpg_owner_trust git add dev/public_gpg_key } # See the xcookie module gitlab python API #gitlab_set_protected_branches(){ #} _test_gnu(){ # shellcheck disable=SC2155 export GNUPGHOME=$(mktemp -d -t) ls -al "$GNUPGHOME" chmod 700 -R "$GNUPGHOME" source dev/secrets_configuration.sh gpg -k load_secrets CI_SECRET="${!VARNAME_CI_SECRET}" echo "CI_SECRET = $CI_SECRET" cat dev/public_gpg_key GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --import GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | gpg --import-ownertrust GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | gpg --import gpg -k # | gpg --import # | gpg --list-packets --verbose } Erotemic-xdoctest-fac8308/dev/spell_passlist.txt000066400000000000000000000000341505122333300221040ustar00rootroot00000000000000cant wont dont doesnt arent Erotemic-xdoctest-fac8308/dev/talk.py000066400000000000000000000115621505122333300176170ustar00rootroot00000000000000""" https://tohtml.com/ """ import operator # def paragraph(text): # r""" # Remove leading, trailing, and double whitespace from multi-line strings. # Args: # text (str): typically in the form of a multiline string # Returns: # str: the reduced text block # """ # import re # out = re.sub(r'\s\s*', ' ', text).strip() # return out # def paragraph(text): # r""" # Remove leading, trailing, and double whitespace from multi-line strings. # Args: # text (str): typically in the form of a multiline string # Returns: # str: the reduced text block # Example: # >>> text = ( # >>> ''' # >>> Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do # >>> eiusmod tempor incididunt ut labore et dolore magna aliqua. # >>> ''') # >>> out = paragraph(text) # >>> assert chr(10) in text and chr(10) not in out # """ # import re # out = re.sub(r'\s\s*', ' ', text).strip() # return out def paragraph(text): r""" Remove leading, trailing, and double whitespace from multi-line strings. Args: text (str): typically in the form of a multiline string Returns: str: the reduced text block Example: >>> text = ( ''' Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. ''') >>> out = paragraph(text) >>> assert chr(10) in text and chr(10) not in out """ import re out = re.sub(r'\s\s*', ' ', text).strip() return out # text = ( # ''' # Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do # eiusmod tempor incididunt ut labore et dolore magna aliqua. # ''') # out = paragraph(text) # assert chr(10) in text and chr(10) not in out # def paragraph(text): # r""" # Remove leading, trailing, and double whitespace from multi-line strings. # Args: # text (str): typically in the form of a multiline string # Returns: # str: the reduced text block # Example: # >>> text = ( # ... ''' # ... Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do # ... eiusmod tempor incididunt ut labore et dolore magna aliqua. # ... ''') # >>> out = paragraph(text) # >>> assert chr(10) in text and chr(10) not in out # """ # import re # out = re.sub(r'\s\s*', ' ', text).strip() # return out # def allsame1(iterable, eq=operator.eq): # """ # Determine if all items in a sequence are the same # Args: # iterable (Iterable): items to determine if they are all the same # eq (Callable, optional): function to determine equality # (default: operator.eq) # Example: # >>> allsame([1, 1, 1, 1]) # True # >>> allsame([]) # True # >>> allsame([0, 1]) # False # >>> iterable = iter([0, 1, 1, 1]) # >>> next(iterable) # >>> allsame(iterable) # True # >>> allsame(range(10)) # False # >>> allsame(range(10), lambda a, b: True) # True # """ # iter_ = iter(iterable) # try: # first = next(iter_) # except StopIteration: # return True # return all(eq(first, item) for item in iter_) def allsame(iterable, eq=operator.eq): """ Determine if all items in a sequence are the same Args: iterable (Iterable): items to determine if they are all the same eq (Callable, optional): function to determine equality (default: operator.eq) Example: >>> allsame([1, 1, 1, 1]) True >>> allsame([]) True >>> allsame([0, 1]) False >>> iterable = iter([0, 1, 1, 1]) >>> next(iterable) >>> allsame(iterable) True >>> allsame(range(10)) False >>> allsame(range(10), lambda a, b: True) True """ iter_ = iter(iterable) try: first = next(iter_) except StopIteration: return True return all(eq(first, item) for item in iter_) def demo_directive(): """ Example: >>> print('This is run') >>> print('This is not run') # xdoctest: +SKIP >>> # xdoctest: +SKIP >>> print('Skip has been enabled, commands will not run') >>> print('This is also not run') >>> print('Block based directives can be deactivated') >>> # xdoctest: -SKIP >>> print('This will now run') """ pass if __name__ == '__main__': """ CommandLine: python -m doctest ~/code/xdoctest/dev/talk.py python -m xdoctest ~/code/xdoctest/dev/talk.py demo_directive """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/dev/torch_exposed_issues.py000066400000000000000000000106261505122333300231250ustar00rootroot00000000000000 # We should be able to handle these: def issue1(): # ISSUE: DOES NOT SKIP CORRECTLY def foo1(): """ >>> # xdoctest: +SKIP("Undefined variables") >>> @custom_sharded_op_impl(torch.nn.functional.linear) >>> def my_custom_sharded_linear(types, args, kwargs, process_group): >>> ... >>> input = torch.rand(10, 32) >>> weight = sharded_tensor.rand(32, 16) >>> bias = torch.rand(16) >>> # This will call 'my_custom_sharded_linear' >>> torch.nn.functional.linear(input, weight, bias) """ # DOES SKIP CORRECTLY, WHY? def foo2(): """ >>> @custom_sharded_op_impl(torch.nn.functional.linear) >>> def my_custom_sharded_linear(types, args, kwargs, process_group): >>> ... >>> # xdoctest: +SKIP("Undefined variables") >>> input = torch.rand(10, 32) >>> weight = sharded_tensor.rand(32, 16) >>> bias = torch.rand(16) >>> # This will call 'my_custom_sharded_linear' >>> torch.nn.functional.linear(input, weight, bias) """ def issue2(): # Should be able to parse that setup def CppExtension(name, sources, *args, **kwargs): r''' Creates a :class:`setuptools.Extension` for C++. Convenience method that creates a :class:`setuptools.Extension` with the bare minimum (but often sufficient) arguments to build a C++ extension. All arguments are forwarded to the :class:`setuptools.Extension` constructor. Example: >>> from setuptools import setup >>> from torch.utils.cpp_extension import BuildExtension, CppExtension >>> setup( name='extension', ext_modules=[ CppExtension( name='extension', sources=['extension.cpp'], extra_compile_args=['-g']), ], cmdclass={ 'build_ext': BuildExtension }) ''' include_dirs = kwargs.get('include_dirs', []) include_dirs += include_paths() kwargs['include_dirs'] = include_dirs library_dirs = kwargs.get('library_dirs', []) library_dirs += library_paths() kwargs['library_dirs'] = library_dirs libraries = kwargs.get('libraries', []) libraries.append('c10') libraries.append('torch') libraries.append('torch_cpu') libraries.append('torch_python') kwargs['libraries'] = libraries kwargs['language'] = 'c++' return setuptools.Extension(name, sources, *args, **kwargs) class LSTMCell(torch.nn.Module): r"""A quantizable long short-term memory (LSTM) cell. For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell` Examples:: >>> import torch.nn.quantizable as nnqa >>> rnn = nnqa.LSTMCell(10, 20) >>> input = torch.randn(3, 10) >>> hx = torch.randn(3, 20) >>> cx = torch.randn(3, 20) >>> output = [] >>> for i in range(6): hx, cx = rnn(input[i], (hx, cx)) output.append(hx) """ _FLOAT_MODULE = torch.nn.LSTMCell def non_doctests(): """ ~/code/pytorch/torch/distributed/launch.py 1. Single-Node multi-process distributed training :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) 2. Multi-Node multi-process distributed training: (e.g. two nodes) Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=0 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) Node 2: :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=1 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) """ Erotemic-xdoctest-fac8308/docs/000077500000000000000000000000001505122333300164575ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/docs/Makefile000066400000000000000000000011761505122333300201240ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) Erotemic-xdoctest-fac8308/docs/make.bat000066400000000000000000000014441505122333300200670ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd Erotemic-xdoctest-fac8308/docs/requirements.txt000066400000000000000000000001771505122333300217500ustar00rootroot00000000000000sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon sphinx-autoapi Pygments ubelt sphinx-reredirects myst_parser Erotemic-xdoctest-fac8308/docs/source/000077500000000000000000000000001505122333300177575ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/docs/source/auto/000077500000000000000000000000001505122333300207275ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/docs/source/auto/modules.rst000066400000000000000000000000751505122333300231330ustar00rootroot00000000000000xdoctest ======== .. toctree:: :maxdepth: 4 xdoctest Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.__main__.rst000066400000000000000000000002501505122333300250320ustar00rootroot00000000000000xdoctest.\_\_main\_\_ module ============================ .. automodule:: xdoctest.__main__ :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest._tokenize.rst000066400000000000000000000002451505122333300253050ustar00rootroot00000000000000xdoctest.\_tokenize module ========================== .. automodule:: xdoctest._tokenize :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.checker.rst000066400000000000000000000002351505122333300247210ustar00rootroot00000000000000xdoctest.checker module ======================= .. automodule:: xdoctest.checker :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.constants.rst000066400000000000000000000002431505122333300253300ustar00rootroot00000000000000xdoctest.constants module ========================= .. automodule:: xdoctest.constants :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.core.rst000066400000000000000000000002241505122333300242430ustar00rootroot00000000000000xdoctest.core module ==================== .. automodule:: xdoctest.core :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.demo.rst000066400000000000000000000002241505122333300242370ustar00rootroot00000000000000xdoctest.demo module ==================== .. automodule:: xdoctest.demo :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.directive.rst000066400000000000000000000002431505122333300252720ustar00rootroot00000000000000xdoctest.directive module ========================= .. automodule:: xdoctest.directive :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.docstr.docscrape_google.rst000066400000000000000000000003171505122333300301120ustar00rootroot00000000000000xdoctest.docstr.docscrape\_google module ======================================== .. automodule:: xdoctest.docstr.docscrape_google :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.docstr.docscrape_numpy.rst000066400000000000000000000003141505122333300300030ustar00rootroot00000000000000xdoctest.docstr.docscrape\_numpy module ======================================= .. automodule:: xdoctest.docstr.docscrape_numpy :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.docstr.rst000066400000000000000000000004721505122333300246160ustar00rootroot00000000000000xdoctest.docstr package ======================= Submodules ---------- .. toctree:: :maxdepth: 4 xdoctest.docstr.docscrape_google xdoctest.docstr.docscrape_numpy Module contents --------------- .. automodule:: xdoctest.docstr :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.doctest_example.rst000066400000000000000000000002671505122333300265020ustar00rootroot00000000000000xdoctest.doctest\_example module ================================ .. automodule:: xdoctest.doctest_example :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.doctest_part.rst000066400000000000000000000002561505122333300260130ustar00rootroot00000000000000xdoctest.doctest\_part module ============================= .. automodule:: xdoctest.doctest_part :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.dynamic_analysis.rst000066400000000000000000000002721505122333300266450ustar00rootroot00000000000000xdoctest.dynamic\_analysis module ================================= .. automodule:: xdoctest.dynamic_analysis :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.exceptions.rst000066400000000000000000000002461505122333300255000ustar00rootroot00000000000000xdoctest.exceptions module ========================== .. automodule:: xdoctest.exceptions :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.global_state.rst000066400000000000000000000002561505122333300257600ustar00rootroot00000000000000xdoctest.global\_state module ============================= .. automodule:: xdoctest.global_state :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.parser.rst000066400000000000000000000002321505122333300246060ustar00rootroot00000000000000xdoctest.parser module ====================== .. automodule:: xdoctest.parser :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.plugin.rst000066400000000000000000000002321505122333300246100ustar00rootroot00000000000000xdoctest.plugin module ====================== .. automodule:: xdoctest.plugin :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.rst000066400000000000000000000012371505122333300233210ustar00rootroot00000000000000xdoctest package ================ Subpackages ----------- .. toctree:: :maxdepth: 4 xdoctest.docstr xdoctest.utils Submodules ---------- .. toctree:: :maxdepth: 4 xdoctest.__main__ xdoctest._tokenize xdoctest.checker xdoctest.constants xdoctest.core xdoctest.demo xdoctest.directive xdoctest.doctest_example xdoctest.doctest_part xdoctest.dynamic_analysis xdoctest.exceptions xdoctest.global_state xdoctest.parser xdoctest.plugin xdoctest.runner xdoctest.static_analysis Module contents --------------- .. automodule:: xdoctest :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.runner.rst000066400000000000000000000002321505122333300246230ustar00rootroot00000000000000xdoctest.runner module ====================== .. automodule:: xdoctest.runner :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.static_analysis.rst000066400000000000000000000002671505122333300265140ustar00rootroot00000000000000xdoctest.static\_analysis module ================================ .. automodule:: xdoctest.static_analysis :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.rst000066400000000000000000000007401505122333300244560ustar00rootroot00000000000000xdoctest.utils package ====================== Submodules ---------- .. toctree:: :maxdepth: 4 xdoctest.utils.util_deprecation xdoctest.utils.util_import xdoctest.utils.util_misc xdoctest.utils.util_mixins xdoctest.utils.util_notebook xdoctest.utils.util_path xdoctest.utils.util_str xdoctest.utils.util_stream Module contents --------------- .. automodule:: xdoctest.utils :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_deprecation.rst000066400000000000000000000003141505122333300300040ustar00rootroot00000000000000xdoctest.utils.util\_deprecation module ======================================= .. automodule:: xdoctest.utils.util_deprecation :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_import.rst000066400000000000000000000002751505122333300270270ustar00rootroot00000000000000xdoctest.utils.util\_import module ================================== .. automodule:: xdoctest.utils.util_import :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_misc.rst000066400000000000000000000002671505122333300264510ustar00rootroot00000000000000xdoctest.utils.util\_misc module ================================ .. automodule:: xdoctest.utils.util_misc :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_mixins.rst000066400000000000000000000002751505122333300270240ustar00rootroot00000000000000xdoctest.utils.util\_mixins module ================================== .. automodule:: xdoctest.utils.util_mixins :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_notebook.rst000066400000000000000000000003031505122333300273250ustar00rootroot00000000000000xdoctest.utils.util\_notebook module ==================================== .. automodule:: xdoctest.utils.util_notebook :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_path.rst000066400000000000000000000002671505122333300264520ustar00rootroot00000000000000xdoctest.utils.util\_path module ================================ .. automodule:: xdoctest.utils.util_path :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_str.rst000066400000000000000000000002641505122333300263230ustar00rootroot00000000000000xdoctest.utils.util\_str module =============================== .. automodule:: xdoctest.utils.util_str :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/auto/xdoctest.utils.util_stream.rst000066400000000000000000000002751505122333300270100ustar00rootroot00000000000000xdoctest.utils.util\_stream module ================================== .. automodule:: xdoctest.utils.util_stream :members: :undoc-members: :show-inheritance: :private-members: Erotemic-xdoctest-fac8308/docs/source/conf.py000066400000000000000000001060571505122333300212670ustar00rootroot00000000000000""" Notes: Based on template code in: ~/code/xcookie/xcookie/builders/docs.py ~/code/xcookie/xcookie/rc/conf_ext.py http://docs.readthedocs.io/en/latest/getting_started.html pip install sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon cd ~/code/xdoctest mkdir -p docs cd docs sphinx-quickstart # need to edit the conf.py # Remove any old auto docs folder and regenerate it. rm -rf ~/code/xdoctest/docs/source/auto cd ~/code/xdoctest/docs sphinx-apidoc --private --separate --force --output-dir ~/code/xdoctest/docs/source/auto ~/code/xdoctest/src/xdoctest '_tokenize.py' git add source/auto/*.rst # Note: the module should importable before running this # (e.g. install it in developer mode or munge the PYTHONPATH) make html Also: To turn on PR checks https://docs.readthedocs.io/en/stable/guides/autobuild-docs-for-pull-requests.html https://readthedocs.org/dashboard/xdoctest/advanced/ ensure your github account is connected to readthedocs https://readthedocs.org/accounts/social/connections/ ### For gitlab To enable the read-the-docs go to https://readthedocs.org/dashboard/ and login The user will need to enable the repo on their readthedocs account: https://readthedocs.org/dashboard/import/manual/? Enter the following information: Set the Repository NAME: xdoctest Set the Repository URL: https://github.com/Erotemic/xdoctest Make sure you have a .readthedocs.yml file For gitlab you also need to setup an integrations. Navigate to: https://readthedocs.org/dashboard/xdoctest/integrations/create/ Then add gitlab incoming webhook and copy the URL (make sure you copy the real url and not the text so https is included), specifically: In the "Integration type:" dropdown menu, select "Gitlab incoming webhook" Click "Add integration" Copy the text in the "Webhook URL" box to be used later. Copy the text in the "Secret" box to be used later. Then go to https://github.com/Erotemic/xdoctest/hooks Click "Add new webhook". Copy the text previously saved from the "Webhook URL" box in the readthedocs form into the "URL" box in the gitlab form. Copy the text previously saved from the "Secret" box in the readthedocs form into the "Secret token" box in the gitlab form. For trigger permissions select the following checkboxes: push events, tag push events, merge request events release events Click the "Add webhook" button. See Docs for more details https://docs.readthedocs.io/en/stable/integrations.html Will also need to activate the main branch: https://readthedocs.org/projects/xdoctest/versions/ """ # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- import sphinx_rtd_theme from os.path import exists from os.path import dirname from os.path import join def parse_version(fpath): """ Statically parse the version number from a python file """ import ast if not exists(fpath): raise ValueError('fpath={!r} does not exist'.format(fpath)) with open(fpath, 'r') as file_: sourcecode = file_.read() pt = ast.parse(sourcecode) class VersionVisitor(ast.NodeVisitor): def visit_Assign(self, node): for target in node.targets: if getattr(target, 'id', None) == '__version__': self.version = node.value.s visitor = VersionVisitor() visitor.visit(pt) return visitor.version project = 'xdoctest' copyright = '2025, Jon Crall' author = 'Jon Crall' modname = 'xdoctest' repo_dpath = dirname(dirname(dirname(__file__))) mod_dpath = join(repo_dpath, 'src/xdoctest') src_dpath = dirname(mod_dpath) modpath = join(mod_dpath, '__init__.py') release = parse_version(modpath) version = '.'.join(release.split('.')[0:2]) # Hack to ensure the module is importable # sys.path.insert(0, os.path.abspath(src_dpath)) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # 'autoapi.extension', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'myst_parser', # For markdown docs 'sphinx.ext.imgconverter', # For building latexpdf 'sphinx.ext.githubpages', # 'sphinxcontrib.redirects', 'sphinxcontrib.jquery', # Fix for search 'sphinx_reredirects', ] todo_include_todos = True napoleon_google_docstring = True napoleon_use_param = False napoleon_use_ivar = True #autoapi_type = 'python' #autoapi_dirs = [mod_dpath] autodoc_inherit_docstrings = False # Hack for geowatch, todo configure autosummary_mock_imports = [ 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_24_and_lt_4_xx', 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_22_and_lt_4_24', 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_21_and_lt_4_22', 'geowatch.tasks.fusion.datamodules.temporal_sampling.affinity_sampling', 'geowatch.tasks.depth_pcd.model', 'geowatch.tasks.cold.export_change_map', ] autodoc_default_options = { # Document callable classes 'special-members': '__call__'} autodoc_member_order = 'bysource' autoclass_content = 'both' # autodoc_mock_imports = ['torch', 'torchvision', 'visdom'] # autoapi_modules = { # modname: { # 'override': False, # 'output': 'auto' # } # } # autoapi_dirs = [f'../../src/{modname}'] # autoapi_keep_files = True # References: # https://stackoverflow.com/questions/21538983/specifying-targets-for-intersphinx-links-to-numpy-scipy-and-matplotlib intersphinx_mapping = { # 'pytorch': ('http://pytorch.org/docs/master/', None), 'python': ('https://docs.python.org/3', None), 'click': ('https://click.palletsprojects.com/', None), # 'xxhash': ('https://pypi.org/project/xxhash/', None), # 'pygments': ('https://pygments.org/docs/', None), # 'tqdm': ('https://tqdm.github.io/', None), # Requires that the repo have objects.inv 'kwarray': ('https://kwarray.readthedocs.io/en/latest/', None), 'kwimage': ('https://kwimage.readthedocs.io/en/latest/', None), # 'kwplot': ('https://kwplot.readthedocs.io/en/latest/', None), 'ndsampler': ('https://ndsampler.readthedocs.io/en/latest/', None), 'ubelt': ('https://ubelt.readthedocs.io/en/latest/', None), 'xdoctest': ('https://xdoctest.readthedocs.io/en/latest/', None), 'networkx': ('https://networkx.org/documentation/stable/', None), 'scriptconfig': ('https://scriptconfig.readthedocs.io/en/latest/', None), 'rich': ('https://rich.readthedocs.io/en/latest/', None), 'numpy': ('https://numpy.org/doc/stable/', None), 'sympy': ('https://docs.sympy.org/latest/', None), 'scikit-learn': ('https://scikit-learn.org/stable/', None), 'pandas': ('https://pandas.pydata.org/docs/', None), 'matplotlib': ('https://matplotlib.org/stable/', None), 'pytest': ('https://docs.pytest.org/en/latest/', None), 'platformdirs': ('https://platformdirs.readthedocs.io/en/latest/', None), 'timerit': ('https://timerit.readthedocs.io/en/latest/', None), 'progiter': ('https://progiter.readthedocs.io/en/latest/', None), 'dateutil': ('https://dateutil.readthedocs.io/en/latest/', None), # 'pytest._pytest.doctest': ('https://docs.pytest.org/en/latest/_modules/_pytest/doctest.html', None), # 'colorama': ('https://pypi.org/project/colorama/', None), # 'cv2' : ('http://docs.opencv.org/2.4/', None), # 'h5py' : ('http://docs.h5py.org/en/latest/', None) } __dev_note__ = """ python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv python -m sphinx.ext.intersphinx https://kwcoco.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv python -m sphinx.ext.intersphinx https://kwarray.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://kwimage.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://ubelt.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv sphobjinv suggest -t 90 -u https://readthedocs.org/projects/pytest/reference/objects.inv "signal.convolve2d" python -m sphinx.ext.intersphinx https://pygments-doc.readthedocs.io/en/latest/objects.inv """ # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'collapse_navigation': False, 'display_version': True, 'navigation_depth': -1, # 'logo_only': True, } # html_logo = '.static/xdoctest.svg' # html_favicon = '.static/xdoctest.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # -- Options for LaTeX output ------------------------------------------------ # References: # https://tex.stackexchange.com/questions/546246/centos-8-the-font-freeserif-cannot-be-found """ # https://www.sphinx-doc.org/en/master/usage/builders/index.html#sphinx.builders.latex.LaTeXBuilder # https://tex.stackexchange.com/a/570691/83399 sudo apt install fonts-freefont-otf texlive-luatex texlive-latex-extra texlive-fonts-recommended texlive-latex-recommended tex-gyre latexmk make latexpdf LATEXMKOPTS="-shell-escape --synctex=-1 -src-specials -interaction=nonstopmode" make latexpdf LATEXMKOPTS="-lualatex -interaction=nonstopmode" make LATEXMKOPTS="-lualatex -interaction=nonstopmode" """ # latex_engine = 'lualatex' # latex_engine = 'xelatex' latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'xdoctest.tex', 'xdoctest Documentation', 'Jon Crall', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'xdoctest', 'xdoctest Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'xdoctest', 'xdoctest Documentation', author, 'xdoctest', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- from sphinx.domains.python import PythonDomain # NOQA # from sphinx.application import Sphinx # NOQA from typing import Any, List # NOQA # HACK TO PREVENT EXCESSIVE TIME. # TODO: FIXME FOR REAL MAX_TIME_MINUTES = None if MAX_TIME_MINUTES: import ubelt # NOQA TIMER = ubelt.Timer() TIMER.tic() class PatchedPythonDomain(PythonDomain): """ References: https://github.com/sphinx-doc/sphinx/issues/3866 """ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): """ Helps to resolves cross-references """ if target.startswith('ub.'): target = 'ubelt.' + target[3] if target.startswith('xdoc.'): target = 'xdoctest.' + target[3] return_value = super(PatchedPythonDomain, self).resolve_xref( env, fromdocname, builder, typ, target, node, contnode) return return_value class GoogleStyleDocstringProcessor: """ A small extension that runs after napoleon and reformats erotemic-flavored google-style docstrings for sphinx. """ def __init__(self, autobuild=1): self.debug = 0 self.registry = {} if autobuild: self._register_builtins() def register_section(self, tag, alias=None): """ Decorator that adds a custom processing function for a non-standard google style tag. The decorated function should accept a list of docstring lines, where the first one will be the google-style tag that likely needs to be replaced, and then return the appropriate sphinx format (TODO what is the name? Is it just RST?). """ alias = [] if alias is None else alias alias = [alias] if not isinstance(alias, (list, tuple, set)) else alias alias.append(tag) alias = tuple(alias) # TODO: better tag patterns def _wrap(func): self.registry[tag] = { 'tag': tag, 'alias': alias, 'func': func, } return func return _wrap def _register_builtins(self): """ Adds definitions I like of CommandLine, TextArt, and Ignore """ @self.register_section(tag='CommandLine') def commandline(lines): new_lines = [] new_lines.append('.. rubric:: CommandLine') new_lines.append('') new_lines.append('.. code-block:: bash') new_lines.append('') new_lines.extend(lines[1:]) return new_lines @self.register_section(tag='SpecialExample', alias=['Benchmark', 'Sympy', 'Doctest']) def benchmark(lines): import textwrap new_lines = [] tag = lines[0].replace(':', '').strip() # new_lines.append(lines[0]) # TODO: it would be nice to change the tagline. # new_lines.append('') new_lines.append('.. rubric:: {}'.format(tag)) new_lines.append('') new_text = textwrap.dedent('\n'.join(lines[1:])) redone = new_text.split('\n') new_lines.extend(redone) # import ubelt as ub # print('new_lines = {}'.format(ub.urepr(new_lines, nl=1))) # new_lines.append('') return new_lines @self.register_section(tag='TextArt', alias=['Ascii']) def text_art(lines): new_lines = [] new_lines.append('.. rubric:: TextArt') new_lines.append('') new_lines.append('.. code-block:: bash') new_lines.append('') new_lines.extend(lines[1:]) return new_lines # @self.register_section(tag='TODO', alias=['.. todo::']) # def todo_section(lines): # """ # Fixup todo sections # """ # import xdev # xdev.embed() # import ubelt as ub # print('lines = {}'.format(ub.urepr(lines, nl=1))) # return new_lines @self.register_section(tag='Ignore') def ignore(lines): return [] def process(self, lines): """ Example: >>> import ubelt as ub >>> self = GoogleStyleDocstringProcessor() >>> lines = ['Hello world', >>> '', >>> 'CommandLine:', >>> ' hi', >>> '', >>> 'CommandLine:', >>> '', >>> ' bye', >>> '', >>> 'TextArt:', >>> '', >>> ' 1', >>> ' 2', >>> '', >>> ' 345', >>> '', >>> 'Foobar:', >>> '', >>> 'TextArt:'] >>> new_lines = self.process(lines[:]) >>> print(chr(10).join(new_lines)) """ orig_lines = lines[:] new_lines = [] curr_mode = '__doc__' accum = [] def accept(): """ called when we finish reading a section """ if curr_mode == '__doc__': # Keep the lines as-is new_lines.extend(accum) else: # Process this section with the given function regitem = self.registry[curr_mode] func = regitem['func'] fixed = func(accum) new_lines.extend(fixed) # Reset the accumulator for the next section accum[:] = [] for line in orig_lines: found = None for regitem in self.registry.values(): if line.startswith(regitem['alias']): found = regitem['tag'] break if not found and line and not line.startswith(' '): # if the line startswith anything but a space, we are no longer # in the previous nested scope. NOTE: This assumption may not # be general, but it works for my code. found = '__doc__' if found: # New section is found, accept the previous one and start # accumulating the new one. accept() curr_mode = found accum.append(line) # Finalize the last section accept() lines[:] = new_lines # make sure there is a blank line at the end if lines and lines[-1]: lines.append('') return lines def process_docstring_callback(self, app, what_: str, name: str, obj: Any, options: Any, lines: List[str]) -> None: """ Callback to be registered to autodoc-process-docstring Custom process to transform docstring lines Remove "Ignore" blocks Args: app (sphinx.application.Sphinx): the Sphinx application object what (str): the type of the object which the docstring belongs to (one of "module", "class", "exception", "function", "method", "attribute") name (str): the fully qualified name of the object obj: the object itself options: the options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are true if the flag option of same name was given to the auto directive lines (List[str]): the lines of the docstring, see above References: https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html """ if self.debug: print(f'ProcessDocstring: name={name}, what_={what_}, num_lines={len(lines)}') # print('BEFORE:') # import ubelt as ub # print('lines = {}'.format(ub.urepr(lines, nl=1))) self.process(lines) # docstr = '\n'.join(lines) # if 'Convert the Mask' in docstr: # import xdev # xdev.embed() # if 'keys in this dictionary ' in docstr: # import xdev # xdev.embed() render_doc_images = 0 if MAX_TIME_MINUTES and TIMER.toc() > (60 * MAX_TIME_MINUTES): render_doc_images = False # FIXME too slow on RTD if render_doc_images: # DEVELOPING if any('REQUIRES(--show)' in line for line in lines): # import xdev # xdev.embed() create_doctest_figure(app, obj, name, lines) FIX_EXAMPLE_FORMATTING = 1 if FIX_EXAMPLE_FORMATTING: for idx, line in enumerate(lines): if line == "Example:": lines[idx] = "**Example:**" lines.insert(idx + 1, "") REFORMAT_SECTIONS = 0 if REFORMAT_SECTIONS: REFORMAT_RETURNS = 0 REFORMAT_PARAMS = 0 docstr = SphinxDocstring(lines) if REFORMAT_PARAMS: for found in docstr.find_tagged_lines('Parameters'): print(found['text']) edit_slice = found['edit_slice'] # TODO: figure out how to do this. # # file = 'foo.rst' # import rstparse # rst = rstparse.Parser() # import io # rst.read(io.StringIO(found['text'])) # rst.parse() # for line in rst.lines: # print(line) # # found['text'] # import docutils # settings = docutils.frontend.OptionParser( # components=(docutils.parsers.rst.Parser,) # ).get_default_values() # document = docutils.utils.new_document('', settings) # from docutils.parsers import rst # rst.Parser().parse(found['text'], document) if REFORMAT_RETURNS: for found in docstr.find_tagged_lines('returns'): # FIXME: account for new slice with -2 offset edit_slice = found['edit_slice'] text = found['text'] new_lines = [] for para in text.split('\n\n'): indent = para[:len(para) - len(para.lstrip())] new_paragraph = indent + paragraph(para) new_lines.append(new_paragraph) new_lines.append('') new_lines = new_lines[:-1] lines[edit_slice] = new_lines # print('AFTER:') # print('lines = {}'.format(ub.urepr(lines, nl=1))) # if name == 'kwimage.Affine.translate': # import sys # sys.exit(1) class SphinxDocstring: """ Helper to parse and modify sphinx docstrings """ def __init__(docstr, lines): docstr.lines = lines # FORMAT THE RETURNS SECTION A BIT NICER import re tag_pat = re.compile(r'^:(\w*):') directive_pat = re.compile(r'^.. (\w*)::\s*(\w*)') # Split by sphinx types, mark the line offset where they start / stop sphinx_parts = [] for idx, line in enumerate(lines): tag_match = tag_pat.search(line) directive_match = directive_pat.search(line) if tag_match: tag = tag_match.groups()[0] sphinx_parts.append({ 'tag': tag, 'start_offset': idx, 'type': 'tag', }) elif directive_match: tag = directive_match.groups()[0] sphinx_parts.append({ 'tag': tag, 'start_offset': idx, 'type': 'directive', }) prev_offset = len(lines) for part in sphinx_parts[::-1]: part['end_offset'] = prev_offset prev_offset = part['start_offset'] docstr.sphinx_parts = sphinx_parts if 0: for line in lines: print(line) def find_tagged_lines(docstr, tag): for part in docstr.sphinx_parts[::-1]: if part['tag'] == tag: edit_slice = slice(part['start_offset'], part['end_offset']) return_section = docstr.lines[edit_slice] text = '\n'.join(return_section) found = { 'edit_slice': edit_slice, 'text': text, } yield found def paragraph(text): r""" Wraps multi-line strings and restructures the text to remove all newlines, heading, trailing, and double spaces. Useful for writing log messages Args: text (str): typically a multiline string Returns: str: the reduced text block """ import re out = re.sub(r'\s\s*', ' ', text).strip() return out def create_doctest_figure(app, obj, name, lines): """ The idea is that each doctest that produces a figure should generate that and then that figure should be part of the docs. """ import xdoctest import sys import types if isinstance(obj, types.ModuleType): module = obj else: module = sys.modules[obj.__module__] # TODO: read settings from pyproject.toml? if '--show' not in sys.argv: sys.argv.append('--show') if '--nointeract' not in sys.argv: sys.argv.append('--nointeract') modpath = module.__file__ # print(doctest.format_src()) import pathlib # HACK: write to the srcdir doc_outdir = pathlib.Path(app.outdir) doc_srcdir = pathlib.Path(app.srcdir) doc_static_outdir = doc_outdir / '_static' doc_static_srcdir = doc_srcdir / '_static' src_fig_dpath = (doc_static_srcdir / 'images') src_fig_dpath.mkdir(exist_ok=True, parents=True) out_fig_dpath = (doc_static_outdir / 'images') out_fig_dpath.mkdir(exist_ok=True, parents=True) # fig_dpath = (doc_outdir / 'autofigs' / name).mkdir(exist_ok=True) fig_num = 1 import kwplot kwplot.autompl(force='agg') plt = kwplot.autoplt() docstr = '\n'.join(lines) # TODO: The freeform parser does not work correctly here. # We need to parse out the sphinx (epdoc)? individual examples # so we can get different figures. But we can hack it for now. import re split_parts = re.split('({}\\s*\n)'.format(re.escape('.. rubric:: Example')), docstr) # split_parts = docstr.split('.. rubric:: Example') # import xdev # xdev.embed() def doctest_line_offsets(doctest): # Where the doctests starts and ends relative to the file start_line_offset = doctest.lineno - 1 last_part = doctest._parts[-1] last_line_offset = start_line_offset + last_part.line_offset + last_part.n_lines - 1 offsets = { 'start': start_line_offset, 'end': last_line_offset, 'stop': last_line_offset + 1, } return offsets # from xdoctest import utils # part_lines = utils.add_line_numbers(docstr.split('\n'), n_digits=3, start=0) # print('\n'.join(part_lines)) to_insert_fpaths = [] curr_line_offset = 0 for part in split_parts: num_lines = part.count('\n') doctests = list(xdoctest.core.parse_docstr_examples( part, modpath=modpath, callname=name, # style='google' )) # print(doctests) # doctests = list(xdoctest.core.parse_docstr_examples( # docstr, modpath=modpath, callname=name)) for doctest in doctests: if '--show' in part: ... # print('-- SHOW TEST---')/) # kwplot.close_figures() try: import pytest # NOQA except ImportError: pass try: from xdoctest.exceptions import Skipped except ImportError: # nocover # Define dummy skipped exception if pytest is not available class Skipped(Exception): pass try: doctest.mode = 'native' doctest.run(verbose=0, on_error='raise') ... except Skipped: print(f'Skip doctest={doctest}') except Exception as ex: print(f'ex={ex}') print(f'Error in doctest={doctest}') offsets = doctest_line_offsets(doctest) doctest_line_end = curr_line_offset + offsets['stop'] insert_line_index = doctest_line_end figures = kwplot.all_figures() for fig in figures: fig_num += 1 # path_name = path_sanatize(name) path_name = (name).replace('.', '_') fig_fpath = src_fig_dpath / f'fig_{path_name}_{fig_num:03d}.jpeg' fig.savefig(fig_fpath) print(f'Wrote figure: {fig_fpath}') to_insert_fpaths.append({ 'insert_line_index': insert_line_index, 'fpath': fig_fpath, }) for fig in figures: plt.close(fig) # kwplot.close_figures(figures) curr_line_offset += (num_lines) # if len(doctests) > 1: # doctests # import xdev # xdev.embed() INSERT_AT = 'end' INSERT_AT = 'inline' end_index = len(lines) # Reverse order for inserts import shutil for info in to_insert_fpaths[::-1]: src_abs_fpath = info['fpath'] rel_to_static_fpath = src_abs_fpath.relative_to(doc_static_srcdir) # dst_abs_fpath = doc_static_outdir / rel_to_static_fpath # dst_abs_fpath.parent.mkdir(parents=True, exist_ok=True) rel_to_root_fpath = src_abs_fpath.relative_to(doc_srcdir) dst_abs_fpath1 = doc_outdir / rel_to_root_fpath dst_abs_fpath1.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src_abs_fpath, dst_abs_fpath1) dst_abs_fpath2 = doc_outdir / rel_to_static_fpath dst_abs_fpath2.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src_abs_fpath, dst_abs_fpath2) dst_abs_fpath3 = doc_srcdir / rel_to_static_fpath dst_abs_fpath3.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src_abs_fpath, dst_abs_fpath3) if INSERT_AT == 'inline': # Try to insert after test insert_index = info['insert_line_index'] elif INSERT_AT == 'end': insert_index = end_index else: raise KeyError(INSERT_AT) lines.insert(insert_index, '.. image:: {}'.format('..' / rel_to_root_fpath)) # lines.insert(insert_index, '.. image:: {}'.format(rel_to_root_fpath)) # lines.insert(insert_index, '.. image:: {}'.format(rel_to_static_fpath)) lines.insert(insert_index, '') def postprocess_hyperlinks(app, doctree, docname): """ Extension to fixup hyperlinks. This should be connected to the Sphinx application's "autodoc-process-docstring" event. """ # Your hyperlink postprocessing logic here from docutils import nodes import pathlib for node in doctree.traverse(nodes.reference): if 'refuri' in node.attributes: refuri = node.attributes['refuri'] if '.rst' in refuri: if 'source' in node.document: fpath = pathlib.Path(node.document['source']) parent_dpath = fpath.parent if (parent_dpath / refuri).exists(): node.attributes['refuri'] = refuri.replace('.rst', '.html') else: raise AssertionError def fix_rst_todo_section(lines): new_lines = [] for line in lines: ... ... def setup(app): import sphinx app : sphinx.application.Sphinx = app app.add_domain(PatchedPythonDomain, override=True) app.connect("doctree-resolved", postprocess_hyperlinks) docstring_processor = GoogleStyleDocstringProcessor() # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings app.connect('autodoc-process-docstring', docstring_processor.process_docstring_callback) def copy(src, dst): import shutil print(f'Copy {src} -> {dst}') assert src.exists() if not dst.parent.exists(): dst.parent.mkdir() shutil.copy(src, dst) ### Hack for kwcoco: TODO: figure out a way for the user to configure this. HACK_FOR_KWCOCO = 0 if HACK_FOR_KWCOCO: import pathlib doc_outdir = pathlib.Path(app.outdir) / 'auto' doc_srcdir = pathlib.Path(app.srcdir) / 'auto' mod_dpath = doc_srcdir / '../../../kwcoco' src_fpath = (mod_dpath / 'coco_schema.json') copy(src_fpath, doc_outdir / src_fpath.name) copy(src_fpath, doc_srcdir / src_fpath.name) src_fpath = (mod_dpath / 'coco_schema_informal.rst') copy(src_fpath, doc_outdir / src_fpath.name) copy(src_fpath, doc_srcdir / src_fpath.name) return app Erotemic-xdoctest-fac8308/docs/source/index.rst000066400000000000000000000005621505122333300216230ustar00rootroot00000000000000:github_url: https://github.com/Erotemic/xdoctest .. The __init__ files contains the top-level documentation overview .. automodule:: xdoctest.__init__ :show-inheritance: .. toctree:: :maxdepth: 8 Package layout manual/xdoc_with_jupyter manual/async_doctest Indices and tables ================== * :ref:`genindex` * :ref:`modindex` Erotemic-xdoctest-fac8308/docs/source/manual/000077500000000000000000000000001505122333300212345ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/docs/source/manual/async_doctest.rst000066400000000000000000000110751505122333300246340ustar00rootroot00000000000000Doctests with async code ------------------------ Python 3.5 introduced `async` functions. These are functions that run within an "event loop" that is not blocked if those functions perform blocking IO. It's similar to writing multi-threaded code but with the advantage of not having to worry about thread safety. For more information see `the python docs `__. Asynchronous python code examples using `asyncio `__ are supported at the top level by xdoctest. This means that your code examples do not have to wrap every small snippet in a function and call :func:`asyncio.run`. xdoctest handles that for you keeping the examples simple and easy to follow. For example **without xdoctest** your code example would have to be written like this: .. code:: python >>> import yourlibrary >>> import asyncio >>> async def connect_and_get_running_info_wrapper(): ... server = await yourlibrary.connect_to_server("example_server_url") ... running_info = await server.get_running_info() ... return server, running_info ... >>> server, running_info = asyncio.run(connect_and_get_running_info_wrapper()) >>> running_info.restarted_at 01:00 >>> async def restart_and_get_running_info_wrapper(server): ... await server.restart() ... return await server.get_running_info() ... >>> running_info = asyncio.run(restart_and_get_running_info_wrapper(server)) >>> running_info.restarted_at 13:15 Now **with xdoctest** this can now be written like this: .. code:: python >>> import yourlibrary >>> server = await yourlibrary.connect_to_server("example_server_url") >>> running_info = await server.get_running_info() >>> running_info.restarted_at 01:00 >>> await server.restart() >>> running_info = await server.get_running_info() >>> running_info.restarted_at 13:15 The improvement in brevity is obvious but even more so if you are writing longer examples where you want to maintain and reuse variables between each test output step. .. note:: If you don't want to utilise this feature for your async examples you don't have to. Just don't write code examples with top level awaits. .. caution:: Each code block with top level awaits runs in its own :func:`asyncio.run`. This means that all tasks created in such a block will be cancelled when it finishes. If you want more asyncio REPL-like behavior, see the next section. ``ASYNC`` Directive =================== By default, xdoctest separates code blocks with top level awaits and blocks without. The former will have a running asyncio event loop, but the latter will not. This can be undesirable when you need to multitask or use the same event loop for all blocks. .. code:: python >>> import yourlibrary >>> import asyncio >>> task = asyncio.create_task(yourlibrary.send_message()) # fails! >>> # ...do something else... >>> result = await task # never be reached To solve this problem, since 1.3.0 xdoctest has a new basic directive, ``ASYNC``. Just enable the directive at the beginning of your code example and you will get the asyncio REPL behavior. .. code:: python >>> # xdoctest: +ASYNC >>> import yourlibrary >>> import asyncio >>> task = asyncio.create_task(yourlibrary.send_message()) # ok >>> # ...do something else... >>> result = await task # will be reached Of course, you can use the directive to cover certain places in your code too. With this you can demonstrate the behavior of your functions both inside and outside :func:`asyncio.run` in a single example. .. code:: python >>> import yourlibrary >>> yourlibrary.in_async_context() # xdoctest: +ASYNC True >>> yourlibrary.in_async_context() # xdoctest: -ASYNC False You may also find it convenient to enable the directive for all tests by default, in order to avoid boilerplate. For this, as with any other basic directive, you can use ``--options ASYNC`` for the native interface and ``--xdoctest-options ASYNC`` for the pytest interface. Caveats ======= * Consumers reading your documentation may not be familiar with async concepts. It could be helpful to mention in your docs that the code examples should be run in an event loop or in a REPL that supports top-level :keyword:`await`. (IPython supports this by default. For the standard Python REPL, use ``python -m asyncio``.) * Using top level awaits in tests that are already running in an event loop is not supported. * Only python's native asyncio library is supported for top level awaits. Erotemic-xdoctest-fac8308/docs/source/manual/installing_python.rst000066400000000000000000000053131505122333300255350ustar00rootroot00000000000000Installing Python ================= Before you can use xdoctest, you must have Python installed. Its also best practice to be in a `virtual environment `_. If you are a Python beginner, then I would recommend setting up a `conda `_ environment. On Linux, I typically use this end-to-end script for installing conda, creating, and activating a virtual environment. .. code-block:: bash # Download the conda install script into a temporary directory mkdir -p ~/tmp cd ~/tmp # To update to a newer version see: # https://docs.conda.io/en/latest/miniconda_hashes.html for updating CONDA_INSTALL_SCRIPT=Miniconda3-py38_4.9.2-Linux-x86_64.sh CONDA_EXPECTED_SHA256=1314b90489f154602fd794accfc90446111514a5a72fe1f71ab83e07de9504a7 curl https://repo.anaconda.com/miniconda/$CONDA_INSTALL_SCRIPT > $CONDA_INSTALL_SCRIPT CONDA_GOT_SHA256=$(sha256sum $CONDA_INSTALL_SCRIPT | cut -d' ' -f1) # For security, it is important to verify the hash if [[ "$CONDA_GOT_SHA256" != "$CONDA_EXPECTED_SHA256_HASH" ]]; then echo "Downloaded file does not match hash! DO NOT CONTINUE!" exit 1; fi chmod +x $CONDA_INSTALL_SCRIPT # Install miniconda to user local directory _CONDA_ROOT=$HOME/.local/conda sh $CONDA_INSTALL_SCRIPT -b -p $_CONDA_ROOT # Activate the basic conda environment source $_CONDA_ROOT/etc/profile.d/conda.sh # Update the base and create a virtual environment named py38 conda update --name base conda --yes conda create --name py38 python=3.8 --yes # Activate your virtualenv # I recommend doing something similar in your ~/.bashrc source $_CONDA_ROOT/etc/profile.d/conda.sh conda activate py38 Once you have created this conda environment, I recommend adding the following lines to your ``.bashrc``. This way you will automatically activate your virtual environment whenever you start a new bash shell. .. code-block:: bash # Enables the conda command _CONDA_ROOT=$HOME/.local/conda source $_CONDA_ROOT/etc/profile.d/conda.sh if [ -d "$HOME/.local/conda/envs/py38" ]; then # Always start in a virtual environment conda activate py38 fi For other operating systems, see the official documentation to install conda `on Windows `_ or `on MacOS `_. Once conda is installed the commands for `managing conda virtual environments `_ are roughly the same across platforms. Erotemic-xdoctest-fac8308/docs/source/manual/xdoc_with_jupyter.rst000066400000000000000000000046331505122333300255460ustar00rootroot00000000000000Running Doctests in Jupyter Notebooks ------------------------------------- You can run doctests within a Jupyter notebook in two ways: Method 1 - Inside the notebook ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Either insert this cell into your notebook: .. code:: python if __name__ == '__main__': import xdoctest xdoctest.doctest_module() This will execute any doctests for callables that are in the top-level namespace of the notebook. While you don’t have to include the ``if __name__`` block, it is better practice because it will prevent issues if you also wish to use “Method 2”. Method 2 - Outside the notebook ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An alternative way to run would be using the xdoctest command line tool and pointing to the notebook file. .. code:: bash xdoctest path/to/notebook.ipynb This will execute *every* cell in the notebook and then execute the doctest of any defined callable with a doctest. Caveats ~~~~~~~ WARNING: in both of the above methods, when you execute doctests it will include any function / class that was defined in the notebook, but also *any external library callable with a doctest that you import directly*! Therefore it is best to (1) never use ``from import *`` statements (in general using ``import *`` is bad practice) and (2) prefer using functions via their module name rather than importing directory. For example instead of ``from numpy import array; x = array([1])`` use ``import numpy as np; x = np.array([1])``. Lastly, it is important to note that Jupyter notebooks are great for prototyping and exploration, but in practice storing algorithm and utilities in Jupyter notebooks is not sustainable (`for some of these reasons`_). Reusable code should eventually be refactored into a `proper pip-installable Python package`_ where the top level directory contains a ``setup.py`` and a folder with a name corresponding to the module name and containing an ``__init__.py`` file and any other package python files. However, if you write you original Jupyter code with doctests, then when you port your code to a proper package the automated tests come with it! (And the above warning does *not* apply to statically parsed python packages) .. _for some of these reasons: https://github.com/grst/nbimporter#update-2019-06-i-do-not-recommend-any-more-to-use-nbimporter .. _proper pip-installable Python package: https://packaging.python.org/tutorials/packaging-projects/ Erotemic-xdoctest-fac8308/docs/source/redirects000066400000000000000000000000351505122333300216640ustar00rootroot00000000000000index.rst xdoctest/index.rst Erotemic-xdoctest-fac8308/docs/todo.rst000066400000000000000000000053301505122333300201570ustar00rootroot00000000000000Tasks: ------ This module is in a working state. It is nearly complete, but there are a few todo items: Extraction: ^^^^^^^^^^^ - [x] Parse freeform-style doctest examples (builtin-doctest default) - [x] Parse google-style doctest examples explicitly - [ ] Parse numpy-style doctest examples explicitly Parsing: ^^^^^^^^ - [X] Removed all known syntax backwards incompatibility. - [ ] Removed all known directive backwards incompatibility. Checking: ^^^^^^^^^ - [x] Support got/want testing with stdout. - [x] Support got/want testing with evaluated statements. - [x] Support got/want testing with ``NORMALIZED_WHITESPACE`` and ``ELLIPSES`` by default - [x] Support toggling got/want directives for backwards compatibility? - [x] Support got/want testing with exceptions. Reporting: ^^^^^^^^^^ - [x] Optional colored output - [x] Support advanced got/want reporting directive for backwards compatibility (e.g udiff, ndiff) Running: ^^^^^^^^ - [x] Standalone ``doctest_module`` entry point. - [x] Plugin based ``pytest`` entry point. - [x] Defaults to static parsing doctests - [x] Ability to dynamically parse doctests - [x] Can run tests in extension modules - [x] Add dynamic parsing to pytest plugin Directives ~~~~~~~~~~ - [x] multi-line directives (new feature, not in doctest) - [x] ``# doctest: +SKIP`` inline directive - [x] ``# doctest: +SKIP`` global directive - [x] ``# doctest: -NORMALIZED_WHITESPACE`` inline directive - [x] ``# doctest: -ELLIPSES`` inline directive - [x] ``# doctest: +REPORT_NDIFF`` inline directive - [x] ``# doctest: +REPORT_UDIFF`` inline directive Testing: ^^^^^^^^ - [x] Tests of core module components - [x] Register on pypi - [x] CI-via Travis - [x] CI-via AppVeyor - [x] Coverage - [x] Add a small pybind11 extension module that demonstrates how tests can be defined and run in extension modules - [ ] 95% or better coverage (note reported coverage is artificially small due to issues with coverage of pytest plugins) Documentation: ^^^^^^^^^^^^^^ - [x] Basic docstring docs - [x] Basic readme - [x] Improve readme - [X] Further improve readme - [X] Auto-generate read-the-docs Documentation - [X] Getting Started documentation in read-the-docs Uncategorized: ^^^^^^^^^^^^^^ - [x] Make a new default mode: auto, which first tries google-style, and then fallback to freeform mode if no doctests are found or if an error occurs. (new in 0.4.0) - [x] multi-part got / want "delayed" matching (new in 0.4.0). - [x] fix the highlighting of the "got" string when dumping test results (new in 0.4.0) - [ ] Write a plugin to sphinx so it uses xdoctest instead of doctest? - [ ] Attempt to get pytorch branch merged: https://github.com/pytorch/pytorch/pull/15648 Erotemic-xdoctest-fac8308/publish.sh000077500000000000000000000370351505122333300175440ustar00rootroot00000000000000#!/usr/bin/env bash __doc__=' Script to publish a new version of this library on PyPI. If your script has binary dependencies then we assume that you have built a proper binary wheel with auditwheel and it exists in the wheelhouse directory. Otherwise, for source tarballs and wheels this script runs the setup.py script to create the wheels as well. Running this script with the default arguments will perform any builds and gpg signing, but nothing will be uploaded to pypi unless the user explicitly sets DO_UPLOAD=True or answers yes to the prompts. Args: TWINE_USERNAME (str) : username for pypi. This must be set if uploading to pypi. Defaults to "". TWINE_PASSWORD (str) : password for pypi. This must be set if uploading to pypi. Defaults to "". DO_GPG (bool) : If True, sign the packages with a GPG key specified by `GPG_KEYID`. defaults to auto. DO_OTS (bool) : If True, make an opentimestamp for the package and signature (if available) DO_UPLOAD (bool) : If True, upload the packages to the pypi server specified by `TWINE_REPOSITORY_URL`. DO_BUILD (bool) : If True, will execute the setup.py build script, which is expected to use setuptools. In the future we may add support for other build systems. If False, this script will expect the pre-built packages to exist in "wheelhouse/{NAME}-{VERSION}-{SUFFIX}.{EXT}". Defaults to "auto". DO_TAG (bool) : if True, will "git tag" the current HEAD with TWINE_REPOSITORY_URL (url) : The URL of the pypi server to upload to. Defaults to "auto", which if on the release branch, this will default to the live pypi server `https://upload.pypi.org/legacy` otherwise this will default to the test.pypi server: `https://test.pypi.org/legacy` GPG_KEYID (str) : The keyid of the gpg key to sign with. (if DO_GPG=True). Defaults to the local git config user.signingkey DEPLOY_REMOTE (str) : The git remote to push any tags to. Defaults to "origin" GPG_EXECUTABLE (path) : Path to the GPG executable. Defaults to "auto", which chooses "gpg2" if it exists, otherwise "gpg". MODE (str): Can be pure, binary, or all. Defaults to pure unless a CMakeLists.txt exists in which case it defaults to binary. Requirements: twine >= 1.13.0 gpg2 >= 2.2.4 OpenSSL >= 1.1.1c Notes: # NEW API TO UPLOAD TO PYPI # https://docs.travis-ci.com/user/deployment/pypi/ # https://packaging.python.org/tutorials/distributing-packages/ # https://stackoverflow.com/questions/45188811/how-to-gpg-sign-a-file-that-is-built-by-travis-ci Based on template in # github.com/Erotemic/xcookie/ ~/code/xcookie/publish.sh Usage: load_secrets # TODO: set a trap to unload secrets? cd # Set your variables or load your secrets export TWINE_USERNAME= export TWINE_PASSWORD= TWINE_REPOSITORY_URL="https://test.pypi.org/legacy/" ' DEBUG=${DEBUG:=''} if [[ "${DEBUG}" != "" ]]; then set -x fi check_variable(){ KEY=$1 HIDE=$2 VAL=${!KEY} if [[ "$HIDE" == "" ]]; then echo "[DEBUG] CHECK VARIABLE: $KEY=\"$VAL\"" else echo "[DEBUG] CHECK VARIABLE: $KEY=" fi if [[ "$VAL" == "" ]]; then echo "[ERROR] UNSET VARIABLE: $KEY=\"$VAL\"" exit 1; fi } normalize_boolean(){ ARG=$1 ARG=$(echo "$ARG" | awk '{print tolower($0)}') if [ "$ARG" = "true" ] || [ "$ARG" = "1" ] || [ "$ARG" = "yes" ] || [ "$ARG" = "y" ] || [ "$ARG" = "on" ]; then echo "True" elif [ "$ARG" = "false" ] || [ "$ARG" = "0" ] || [ "$ARG" = "no" ] || [ "$ARG" = "n" ] || [ "$ARG" = "off" ]; then echo "False" else echo "$ARG" fi } #### # Parameters ### # Options DEPLOY_REMOTE=${DEPLOY_REMOTE:=origin} NAME=${NAME:=$(python -c "import setup; print(setup.NAME)")} VERSION=$(python -c "import setup; print(setup.VERSION)") check_variable DEPLOY_REMOTE ARG_1=$1 DO_UPLOAD=${DO_UPLOAD:=$ARG_1} DO_TAG=${DO_TAG:=$ARG_1} DO_GPG=${DO_GPG:="auto"} if [ "$DO_GPG" == "auto" ]; then DO_GPG="True" fi DO_OTS=${DO_OTS:="auto"} if [ "$DO_OTS" == "auto" ]; then # Do opentimestamp if it is available # python -m pip install opentimestamps-client if type ots ; then DO_OTS="True" else DO_OTS="False" fi fi DO_BUILD=${DO_BUILD:="auto"} # Verify that we want to build if [ "$DO_BUILD" == "auto" ]; then DO_BUILD="True" fi DO_GPG=$(normalize_boolean "$DO_GPG") DO_OTS=$(normalize_boolean "$DO_OTS") DO_BUILD=$(normalize_boolean "$DO_BUILD") DO_UPLOAD=$(normalize_boolean "$DO_UPLOAD") DO_TAG=$(normalize_boolean "$DO_TAG") TWINE_USERNAME=${TWINE_USERNAME:=""} TWINE_PASSWORD=${TWINE_PASSWORD:=""} DEFAULT_TEST_TWINE_REPO_URL="https://test.pypi.org/legacy/" DEFAULT_LIVE_TWINE_REPO_URL="https://upload.pypi.org/legacy/" TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="auto"} if [[ "${TWINE_REPOSITORY_URL}" == "auto" ]]; then #if [[ "$(cat .git/HEAD)" != "ref: refs/heads/release" ]]; then # # If we are not on release, then default to the test pypi upload repo # TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="https://test.pypi.org/legacy/"} #else if [[ "$DEBUG" == "" ]]; then TWINE_REPOSITORY_URL="live" else TWINE_REPOSITORY_URL="test" fi fi if [[ "${TWINE_REPOSITORY_URL}" == "live" ]]; then TWINE_REPOSITORY_URL=$DEFAULT_LIVE_TWINE_REPO_URL elif [[ "${TWINE_REPOSITORY_URL}" == "test" ]]; then TWINE_REPOSITORY_URL=$DEFAULT_TEST_TWINE_REPO_URL fi GPG_EXECUTABLE=${GPG_EXECUTABLE:="auto"} if [[ "$GPG_EXECUTABLE" == "auto" ]]; then if [[ "$(which gpg2)" != "" ]]; then GPG_EXECUTABLE="gpg2" else GPG_EXECUTABLE="gpg" fi fi GPG_KEYID=${GPG_KEYID:="auto"} if [[ "$GPG_KEYID" == "auto" ]]; then GPG_KEYID=$(git config --local user.signingkey) if [[ "$GPG_KEYID" == "" ]]; then GPG_KEYID=$(git config --global user.signingkey) fi fi if [ -f CMakeLists.txt ] ; then DEFAULT_MODE="binary" else DEFAULT_MODE="pure" fi # TODO: parameterize # The default should change depending on the application MODE=${MODE:=$DEFAULT_MODE} if [[ "$MODE" == "all" ]]; then MODE_LIST=("sdist" "native" "bdist") elif [[ "$MODE" == "pure" ]]; then MODE_LIST=("sdist" "native") elif [[ "$MODE" == "binary" ]]; then MODE_LIST=("sdist" "bdist") else MODE_LIST=("$MODE") fi MODE_LIST_STR=$(printf '"%s" ' "${MODE_LIST[@]}") #echo "MODE_LIST_STR = $MODE_LIST_STR" #### # Logic ### WAS_INTERACTION="False" echo " === PYPI BUILDING SCRIPT == NAME='$NAME' VERSION='$VERSION' TWINE_USERNAME='$TWINE_USERNAME' TWINE_REPOSITORY_URL = $TWINE_REPOSITORY_URL GPG_KEYID = '$GPG_KEYID' DO_UPLOAD=${DO_UPLOAD} DO_TAG=${DO_TAG} DO_GPG=${DO_GPG} DO_OTS=${DO_OTS} DO_BUILD=${DO_BUILD} MODE_LIST_STR=${MODE_LIST_STR} " # Verify that we want to tag if [[ "$DO_TAG" == "True" ]]; then echo "About to tag VERSION='$VERSION'" else if [[ "$DO_TAG" == "False" ]]; then echo "We are NOT about to tag VERSION='$VERSION'" else # shellcheck disable=SC2162 read -p "Do you want to git tag and push version='$VERSION'? (input 'yes' to confirm)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" DO_TAG="$ANS" DO_TAG=$(normalize_boolean "$DO_TAG") if [ "$DO_BUILD" == "auto" ]; then DO_BUILD="" DO_GPG="" fi fi fi if [[ "$DO_BUILD" == "True" ]]; then echo "About to build wheels" else if [[ "$DO_BUILD" == "False" ]]; then echo "We are NOT about to build wheels" else # shellcheck disable=SC2162 read -p "Do you need to build wheels? (input 'yes' to confirm)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" DO_BUILD="$ANS" DO_BUILD=$(normalize_boolean "$DO_BUILD") fi fi # Verify that we want to publish if [[ "$DO_UPLOAD" == "True" ]]; then echo "About to directly publish VERSION='$VERSION'" else if [[ "$DO_UPLOAD" == "False" ]]; then echo "We are NOT about to directly publish VERSION='$VERSION'" else # shellcheck disable=SC2162 read -p "Are you ready to directly publish version='$VERSION'? ('yes' will twine upload)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" DO_UPLOAD="$ANS" DO_UPLOAD=$(normalize_boolean "$DO_UPLOAD") fi fi if [[ "$WAS_INTERACTION" == "True" ]]; then echo " === PYPI BUILDING SCRIPT == VERSION='$VERSION' TWINE_USERNAME='$TWINE_USERNAME' TWINE_REPOSITORY_URL = $TWINE_REPOSITORY_URL GPG_KEYID = '$GPG_KEYID' DO_UPLOAD=${DO_UPLOAD} DO_TAG=${DO_TAG} DO_GPG=${DO_GPG} DO_BUILD=${DO_BUILD} MODE_LIST_STR='${MODE_LIST_STR}' " # shellcheck disable=SC2162 read -p "Look good? Ready to build? Enter any text to continue" ANS fi if [ "$DO_BUILD" == "True" ]; then echo " === === " echo "LIVE BUILDING" # Build wheel and source distribution for _MODE in "${MODE_LIST[@]}" do echo "_MODE = $_MODE" if [[ "$_MODE" == "sdist" ]]; then python setup.py sdist || { echo 'failed to build sdist wheel' ; exit 1; } elif [[ "$_MODE" == "native" ]]; then python setup.py bdist_wheel || { echo 'failed to build native wheel' ; exit 1; } elif [[ "$_MODE" == "bdist" ]]; then echo "Assume wheel has already been built" else echo "ERROR: bad mode" exit 1 fi done echo " === === " else echo "DO_BUILD=False, Skipping build" fi ls_array(){ __doc__=' Read the results of a glob pattern into an array Args: arr_name glob_pattern Example: arr_name="myarray" glob_pattern="*" pass ' local arr_name="$1" local glob_pattern="$2" shopt -s nullglob # shellcheck disable=SC2206 array=($glob_pattern) shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later # FIXME; for some reason this does not always work properly # Copy the array into the dynamically named variable # shellcheck disable=SC2086 readarray -t $arr_name < <(printf '%s\n' "${array[@]}") } WHEEL_FPATHS=() for _MODE in "${MODE_LIST[@]}" do if [[ "$_MODE" == "sdist" ]]; then ls_array "_NEW_WHEEL_PATHS" "dist/${NAME}-${VERSION}*.tar.gz" elif [[ "$_MODE" == "native" ]]; then ls_array "_NEW_WHEEL_PATHS" "dist/${NAME}-${VERSION}*.whl" elif [[ "$_MODE" == "bdist" ]]; then ls_array "_NEW_WHEEL_PATHS" "wheelhouse/${NAME}-${VERSION}-*.whl" else echo "ERROR: bad mode" exit 1 fi # hacky CONCAT because for some reason ls_array will return # something that looks empty but has one empty element for new_item in "${_NEW_WHEEL_PATHS[@]}" do if [[ "$new_item" != "" ]]; then WHEEL_FPATHS+=("$new_item") fi done done # Dedup the paths readarray -t WHEEL_FPATHS < <(printf '%s\n' "${WHEEL_FPATHS[@]}" | sort -u) WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_FPATHS[@]}") echo "WHEEL_PATHS_STR = $WHEEL_PATHS_STR" echo " MODE=$MODE VERSION='$VERSION' WHEEL_FPATHS='$WHEEL_PATHS_STR' " WHEEL_SIGNATURE_FPATHS=() if [ "$DO_GPG" == "True" ]; then echo " === === " for WHEEL_FPATH in "${WHEEL_FPATHS[@]}" do echo "WHEEL_FPATH = $WHEEL_FPATH" check_variable WHEEL_FPATH # https://stackoverflow.com/questions/45188811/how-to-gpg-sign-a-file-that-is-built-by-travis-ci # secure gpg --export-secret-keys > all.gpg # REQUIRES GPG >= 2.2 check_variable GPG_EXECUTABLE || { echo 'failed no gpg exe' ; exit 1; } check_variable GPG_KEYID || { echo 'failed no gpg key' ; exit 1; } echo "Signing wheels" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" echo "GPG_SIGN_CMD = $GPG_SIGN_CMD" $GPG_SIGN_CMD --output "$WHEEL_FPATH".asc "$WHEEL_FPATH" echo "Checking wheels" twine check "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not check wheels' ; exit 1; } echo "Verifying wheels" $GPG_EXECUTABLE --verify "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not verify wheels' ; exit 1; } WHEEL_SIGNATURE_FPATHS+=("$WHEEL_FPATH".asc) done echo " === === " else echo "DO_GPG=False, Skipping GPG sign" fi if [ "$DO_OTS" == "True" ]; then echo " === === " if [ "$DO_GPG" == "True" ]; then # Stamp the wheels and the signatures ots stamp "${WHEEL_FPATHS[@]}" "${WHEEL_SIGNATURE_FPATHS[@]}" else # Stamp only the wheels ots stamp "${WHEEL_FPATHS[@]}" fi echo " === === " else echo "DO_OTS=False, Skipping OTS sign" fi if [[ "$DO_TAG" == "True" ]]; then TAG_NAME="v${VERSION}" # if we messed up we can delete the tag # git push origin :refs/tags/$TAG_NAME # and then tag with -f # git tag "$TAG_NAME" -m "tarball tag $VERSION" git push --tags "$DEPLOY_REMOTE" echo "Should also do a: git push $DEPLOY_REMOTE main:release" echo "For github should draft a new release: https://github.com/PyUtils/line_profiler/releases/new" else echo "Not tagging" fi if [[ "$DO_UPLOAD" == "True" ]]; then check_variable TWINE_USERNAME check_variable TWINE_PASSWORD "hide" for WHEEL_FPATH in "${WHEEL_FPATHS[@]}" do twine upload --username "$TWINE_USERNAME" "--password=$TWINE_PASSWORD" \ --repository-url "$TWINE_REPOSITORY_URL" \ "$WHEEL_FPATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } done echo """ !!! FINISH: LIVE RUN !!! """ else echo """ DRY RUN ... Skipping upload DEPLOY_REMOTE = '$DEPLOY_REMOTE' DO_UPLOAD = '$DO_UPLOAD' WHEEL_FPATH = '$WHEEL_FPATH' WHEEL_PATHS_STR = '$WHEEL_PATHS_STR' MODE_LIST_STR = '$MODE_LIST_STR' VERSION='$VERSION' NAME='$NAME' TWINE_USERNAME='$TWINE_USERNAME' GPG_KEYID = '$GPG_KEYID' To do live run set DO_UPLOAD=1 and ensure deploy and current branch are the same !!! FINISH: DRY RUN !!! """ fi __devel__=' # Checking to see how easy it is to upload packages to gitlab. # This logic should go in the CI script, not sure if it belongs here. export HOST=https://gitlab.kitware.com export GROUP_NAME=computer-vision export PROJECT_NAME=geowatch PROJECT_VERSION=$(geowatch --version) echo "$PROJECT_VERSION" load_secrets export PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.name==\"$GROUP_NAME\")) | .[0].id") echo "GROUP_ID = $GROUP_ID" curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" PROJ_ID=$(cat "$TMP_DIR/group_info" | jq ".projects | map(select(.name==\"$PROJECT_NAME\")) | .[0].id") echo "PROJ_ID = $PROJ_ID" ls_array DIST_FPATHS "dist/*" for FPATH in "${DIST_FPATHS[@]}" do FNAME=$(basename $FPATH) echo $FNAME curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" \ --upload-file $FPATH \ "https://gitlab.kitware.com/api/v4/projects/$PROJ_ID/packages/generic/$PROJECT_NAME/$PROJECT_VERSION/$FNAME" done ' Erotemic-xdoctest-fac8308/pyproject.toml000066400000000000000000000055431505122333300204520ustar00rootroot00000000000000[build-system] requires = [ "setuptools>=41.0.1",] build-backend = "setuptools.build_meta" [tool.mypy] ignore_missing_imports = true [tool.xcookie] tags = [ "erotemic", "github", "purepy",] mod_name = "xdoctest" repo_name = "xdoctest" rel_mod_parent_dpath = "./src" os = [ "all", "win", "linux", "osx",] min_python = '3.8' author = "Jon Crall" author_email = "erotemic@gmail.com" description = "A rewrite of the builtin doctest module" url = "https://github.com/Erotemic/xdoctest" license = "Apache 2" dev_status = "stable" typed = true skip_autogen = ["MANIFEST.in"] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Testing", "Topic :: Utilities", "Framework :: Pytest", # This should be interpreted as Apache License v2.0 "License :: OSI Approved :: Apache Software License", # Supported Python versions "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: CPython", ] [tool.xcookie.setuptools] keywords = 'xdoctest,doctest,test,docstr,pytest' [tool.xcookie.entry_points] # the console_scripts entry point creates the xdoctest executable console_scripts = [ "xdoctest = xdoctest.__main__:main", ] # the pytest11 entry point makes the plugin available to pytest pytest11 = [ "xdoctest = xdoctest.plugin", ] [tool.pytest.ini_options] addopts = "-p no:doctest --xdoctest --xdoctest-style=google --ignore-glob=setup.py --ignore-glob=dev --ignore-glob=docs" norecursedirs = ".git ignore build __pycache__ dev _skbuild docs" filterwarnings = [ "default", "ignore:.*No cfgstr given in Cacher constructor or call.*:Warning", "ignore:.*Define the __nice__ method for.*:Warning", "ignore:.*private pytest class or function.*:Warning",] [tool.coverage.run] branch = true source = ["src/xdoctest"] [tool.coverage.report] exclude_lines = [ "pragma: no cover", ".* # pragma: no cover", ".* # nocover", "def __repr__", "raise AssertionError", "raise NotImplementedError", "if 0:", "if trace is not None", "verbose = .*", "^ *raise", "^ *pass *$", "if _debug:", "if __name__ == .__main__.:", ] omit = [ "*/setup.py", ] [tool.xdoctest] options = '' [tool.codespell] skip = ['./docs/build', './*.egg-info', './build', './htmlcov'] count = true quiet-level = 3 ignore-words-list = ['wont', 'cant', 'ANS', 'doesnt', 'arent', 'ans', 'thats', 'datas', 'isnt', 'didnt', 'wasnt'] Erotemic-xdoctest-fac8308/pytest.ini000066400000000000000000000010761505122333300175640ustar00rootroot00000000000000[pytest] # ON COVERAGE OF PYTEST PLUGINS: # http://pytest-cov.readthedocs.io/en/latest/plugins.html addopts = -p pytester -p no:doctest --xdoctest --ignore-glob=setup.py --ignore=.tox --ignore=setup.py --ignore=dev norecursedirs = .git ignore build __pycache__ docs *.egg-info dev tests/pybind11_test setup.py # --pyargs --doctest-modules --ignore=.tox ;rsyncdirs = tox.ini pytest.py _pytest testing ;python_files = test_*.py *_test.py tests/*/*.py ;python_classes = Test Acceptance ;python_functions = test ;norecursedirs = .tox ja .hg cx_freeze_source ;xfail_strict=true Erotemic-xdoctest-fac8308/requirements.txt000066400000000000000000000001231505122333300210070ustar00rootroot00000000000000-r requirements/runtime.txt -r requirements/tests.txt -r requirements/optional.txt Erotemic-xdoctest-fac8308/requirements/000077500000000000000000000000001505122333300202525ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/requirements/colors.txt000066400000000000000000000001001505122333300223030ustar00rootroot00000000000000Pygments >= 2.15.1 colorama >= 0.4.1;platform_system=="Windows" Erotemic-xdoctest-fac8308/requirements/docs.txt000066400000000000000000000003031505122333300217370ustar00rootroot00000000000000sphinx >= 5.0.1 sphinx-autobuild >= 2021.3.14 sphinx_rtd_theme >= 1.0.0 sphinxcontrib-napoleon >= 0.7 sphinx-autoapi >= 1.8.4 Pygments >= 2.15.1 myst_parser >= 0.18.0 sphinx-reredirects >= 0.0.1 Erotemic-xdoctest-fac8308/requirements/jupyter.txt000066400000000000000000000022071505122333300225160ustar00rootroot00000000000000nbconvert>=6.1.0; python_version >= '3.7.0' and platform_python_implementation != "PyPy" # Python 3.7+ # For nbconvert jinja2>=3.0.0 ; platform_python_implementation != "PyPy" # Python 3.6+ jupyter_client>=7.0.0 IPython>=7.23.1 # Requires psutil? Breaking on pypy windows? ipykernel>=6.11.0 ; python_version < '4.0' and python_version >= '3.12' and (platform_system!="Windows" or platform_python_implementation != "PyPy") # Python 3.12+ ipykernel>=6.0.0 ; python_version < '3.12' and python_version >= '3.7' and (platform_system!="Windows" or platform_python_implementation != "PyPy") # Python 3.7-3.11 # For IPython-kernel debugpy>=1.6.0 ; python_version >= '3.10' # Python 3.10+ debugpy>=1.3.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 debugpy>=1.0.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 # Needed for 3.10 tests # Needed for earlier versions of nbconvert ipython_genutils >= 0.2.0 ; platform_python_implementation != "PyPy" # Python 3.10+ # For IPython jedi>=0.16 attrs>=19.2.0 # For jupyter-client jupyter_core >= 4.7.0 Erotemic-xdoctest-fac8308/requirements/optional.txt000066400000000000000000000002361505122333300226410ustar00rootroot00000000000000-r colors.txt -r jupyter.txt pyflakes >= 2.2.0 # xdev availpkg tomli tomli>=0.2.0 ; python_version < '3.11.0' and python_version >= '3.6' # Python 3.6+ Erotemic-xdoctest-fac8308/requirements/runtime.txt000066400000000000000000000000001505122333300224640ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/requirements/tests-binary.txt000066400000000000000000000014231505122333300234370ustar00rootroot00000000000000# For testing doctests in binary extension modules scikit-build>=0.16.1 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ scikit-build>=0.11.1 ; python_version < '3.11' # Python <=3.10 ninja>=1.11.1 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ ninja>=1.10.2 ; python_version < '3.11' # Python <=3.10 pybind11>=2.10.3 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ pybind11>=2.7.1 ; python_version < '3.11' # Python <=3.10 cmake>=3.25.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ cmake>=3.21.2 ; python_version < '3.11' # Python <=3.10 Erotemic-xdoctest-fac8308/requirements/tests.txt000066400000000000000000000007331505122333300221600ustar00rootroot00000000000000# Pin maximum pytest versions for older python versions # TODO: determine what the actual minimum and maximum acceptable versions of # pytest (that are also compatible with xdoctest) are for each legacy python # major.minor version. # See xdev availpkg for helper script pytest >= 6.2.5 ; python_version >= '3.10.0' # Python 3.10+ pytest >= 4.6.0 ; python_version < '3.10.0' and python_version >= '3.7.0' # Python 3.7-3.9 pytest-cov >= 3.0.0 Erotemic-xdoctest-fac8308/run_developer_setup.sh000077500000000000000000000001721505122333300221570ustar00rootroot00000000000000#!/bin/bash # Install dependency packages pip install -r requirements.txt # Install in development mode pip install -e . Erotemic-xdoctest-fac8308/run_doctests.sh000077500000000000000000000001011505122333300205720ustar00rootroot00000000000000#!/usr/bin/env bash xdoctest src/xdoctest --style=google all "$@"Erotemic-xdoctest-fac8308/run_linter.sh000077500000000000000000000002421505122333300202450ustar00rootroot00000000000000#!/bin/bash flake8 --count --select=E9,F63,F7,F82 --show-source --statistics src/xdoctest flake8 --count --select=E9,F63,F7,F82 --show-source --statistics ./testsErotemic-xdoctest-fac8308/run_tests.py000077500000000000000000000007251505122333300201360ustar00rootroot00000000000000#!/usr/bin/env python if __name__ == '__main__': import pytest import sys package_name = 'xdoctest' mod_dpath = 'src/xdoctest' test_dpath = 'tests' pytest_args = [ '--cov-config', 'pyproject.toml', '--cov-report', 'html', '--cov-report', 'term', '--xdoctest', '--cov=' + package_name, mod_dpath, test_dpath ] pytest_args = pytest_args + sys.argv[1:] sys.exit(pytest.main(pytest_args)) Erotemic-xdoctest-fac8308/setup.py000077500000000000000000000250671505122333300172560ustar00rootroot00000000000000#!/usr/bin/env python # Generated by ~/code/xcookie/xcookie/builders/setup.py # based on part ~/code/xcookie/xcookie/rc/setup.py.in import sys import re from os.path import exists, dirname, join from setuptools import find_packages from setuptools import setup def parse_version(fpath): """ Statically parse the version number from a python file """ value = static_parse("__version__", fpath) return value def static_parse(varname, fpath): """ Statically parse the a constant variable from a python file. Raise an error if the variable is not a constant. """ import ast if not exists(fpath): raise ValueError("fpath={!r} does not exist".format(fpath)) with open(fpath, "r") as file_: sourcecode = file_.read() pt = ast.parse(sourcecode) class StaticVisitor(ast.NodeVisitor): def visit_Assign(self, node: ast.Assign): for target in node.targets: if getattr(target, "id", None) == varname: value: ast.expr = node.value if not isinstance(value, ast.Constant): raise ValueError("variable {!r} is not a constant".format(varname)) self.static_value = value.value visitor = StaticVisitor() visitor.visit(pt) try: value = visitor.static_value except AttributeError: import warnings value = "Unknown {}".format(varname) warnings.warn(value) return value def parse_description(): """ Parse the description in the README file CommandLine: pandoc --from=markdown --to=rst --output=README.rst README.md python -c "import setup; print(setup.parse_description())" """ readme_fpath = join(dirname(__file__), "README.rst") # This breaks on pip install, so check that it exists. if exists(readme_fpath): with open(readme_fpath, "r") as f: text = f.read() return text return "" def parse_requirements(fname="requirements.txt", versions=False): """ Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file versions (bool | str, default=False): If true include version specs. If strict, then pin to the minimum version. Returns: List[str]: list of requirements items CommandLine: python -c "import setup, ubelt; print(ubelt.urepr(setup.parse_requirements()))" """ require_fpath = fname def parse_line(line, dpath=""): """ Parse information from a line in a requirements text file line = 'git+https://a.com/somedep@sometag#egg=SomeDep' line = '-e git+https://a.com/somedep@sometag#egg=SomeDep' """ # Remove inline comments comment_pos = line.find(" #") if comment_pos > -1: line = line[:comment_pos] if line.startswith("-r "): # Allow specifying requirements in other files target = join(dpath, line.split(" ")[1]) for info in parse_require_file(target): yield info else: # See: https://www.python.org/dev/peps/pep-0508/ info = {"line": line} if line.startswith("-e "): info["package"] = line.split("#egg=")[1] else: if "--find-links" in line: # setuptools doesnt seem to handle find links line = line.split("--find-links")[0] if ";" in line: pkgpart, platpart = line.split(";") # Handle platform specific dependencies # setuptools.readthedocs.io/en/latest/setuptools.html # #declaring-platform-specific-dependencies plat_deps = platpart.strip() info["platform_deps"] = plat_deps else: pkgpart = line platpart = None # Remove versioning from the package pat = "(" + "|".join([">=", "==", ">"]) + ")" parts = re.split(pat, pkgpart, maxsplit=1) parts = [p.strip() for p in parts] info["package"] = parts[0] if len(parts) > 1: op, rest = parts[1:] version = rest # NOQA info["version"] = (op, version) yield info def parse_require_file(fpath): dpath = dirname(fpath) with open(fpath, "r") as f: for line in f.readlines(): line = line.strip() if line and not line.startswith("#"): for info in parse_line(line, dpath=dpath): yield info def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info["package"]] if versions and "version" in info: if versions == "strict": # In strict mode, we pin to the minimum version if info["version"]: # Only replace the first >= instance verstr = "".join(info["version"]).replace(">=", "==", 1) parts.append(verstr) else: parts.extend(info["version"]) if not sys.version.startswith("3.4"): # apparently package_deps are broken in 3.4 plat_deps = info.get("platform_deps") if plat_deps is not None: parts.append(";" + plat_deps) item = "".join(parts) yield item packages = list(gen_packages_items()) return packages # # Maybe use in the future? But has private deps # def parse_requirements_alt(fpath='requirements.txt', versions='loose'): # """ # Args: # versions (str): can be # False or "free" - remove all constraints # True or "loose" - use the greater or equal (>=) in the req file # strict - replace all greater equal with equals # """ # # Note: different versions of pip might have different internals. # # This may need to be fixed. # from pip._internal.req import parse_requirements # from pip._internal.network.session import PipSession # requirements = [] # for req in parse_requirements(fpath, session=PipSession()): # if not versions or versions == 'free': # req_name = req.requirement.split(' ')[0] # requirements.append(req_name) # elif versions == 'loose' or versions is True: # requirements.append(req.requirement) # elif versions == 'strict': # part1, *rest = req.requirement.split(';') # strict_req = ';'.join([part1.replace('>=', '==')] + rest) # requirements.append(strict_req) # else: # raise KeyError(versions) # requirements = [r.replace(' ', '') for r in requirements] # return requirements NAME = "xdoctest" INIT_PATH = "src/xdoctest/__init__.py" VERSION = parse_version(INIT_PATH) if __name__ == "__main__": setupkw = {} setupkw["install_requires"] = parse_requirements( "requirements/runtime.txt", versions="loose" ) setupkw["extras_require"] = { "all": parse_requirements("requirements.txt", versions="loose"), "runtime": parse_requirements("requirements/runtime.txt", versions="loose"), "tests": parse_requirements("requirements/tests.txt", versions="loose"), "optional": parse_requirements("requirements/optional.txt", versions="loose"), "colors": parse_requirements("requirements/colors.txt", versions="loose"), "docs": parse_requirements("requirements/docs.txt", versions="loose"), "jupyter": parse_requirements("requirements/jupyter.txt", versions="loose"), "tests-binary": parse_requirements( "requirements/tests-binary.txt", versions="loose" ), "all-strict": parse_requirements("requirements.txt", versions="strict"), "runtime-strict": parse_requirements( "requirements/runtime.txt", versions="strict" ), "tests-strict": parse_requirements("requirements/tests.txt", versions="strict"), "optional-strict": parse_requirements( "requirements/optional.txt", versions="strict" ), "colors-strict": parse_requirements( "requirements/colors.txt", versions="strict" ), "docs-strict": parse_requirements("requirements/docs.txt", versions="strict"), "jupyter-strict": parse_requirements( "requirements/jupyter.txt", versions="strict" ), "tests-binary-strict": parse_requirements( "requirements/tests-binary.txt", versions="strict" ), } setupkw["name"] = NAME setupkw["version"] = VERSION setupkw["author"] = "Jon Crall" setupkw["author_email"] = "erotemic@gmail.com" setupkw["url"] = "https://github.com/Erotemic/xdoctest" setupkw["description"] = "A rewrite of the builtin doctest module" setupkw["long_description"] = parse_description() setupkw["long_description_content_type"] = "text/x-rst" setupkw["license"] = "Apache 2" setupkw["packages"] = find_packages("./src") setupkw["python_requires"] = ">=3.8" setupkw["classifiers"] = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities", "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", "Framework :: Pytest", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: CPython", ] setupkw["package_data"] = {"xdoctest": ["py.typed", "*.pyi"]} setupkw["package_dir"] = { "": "./src", } setupkw["entry_points"] = { "console_scripts": [ "xdoctest = xdoctest.__main__:main", ], "pytest11": [ "xdoctest = xdoctest.plugin", ], } setupkw["keywords"] = "xdoctest,doctest,test,docstr,pytest" setup(**setupkw) Erotemic-xdoctest-fac8308/src/000077500000000000000000000000001505122333300163165ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/src/xdoctest/000077500000000000000000000000001505122333300201535ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/src/xdoctest/__init__.py000066400000000000000000000275041505122333300222740ustar00rootroot00000000000000# :github_url: https://github.com/Erotemic/xdoctest ''' .. The large version wont work because github strips rst image rescaling. https://i.imgur.com/u0tYYxM.png .. image:: https://i.imgur.com/u0tYYxM.png :height: 100px :align: left .. note that the following few characters are invisible unicode characters so .. we can hack the position of the title ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ Xdoctest - Execute Doctests =========================== Xdoctest is a Python package for executing tests in documentation strings! What is a `doctest `__? It is example code you write in a docstring! What is a `docstring `__? Its a string you use as a comment! They get attached to Python functions and classes as metadata. They are often used to auto-generate documentation. Why is it cool? Because you can write tests while you code! Xdoctest finds and executes your doctests for you. Just run ``xdoctest ``. It plugs into pytest to make it easy to run on a CI. Install and run ``pytest --xdoctest``. The :py:mod:`xdoctest` package is a re-write of Python's builtin :py:mod:`doctest` module. It replaces the old regex-based parser with a new abstract-syntax-tree based parser (using Python's :py:mod:`ast` module). The goal is to make doctests easier to write, simpler to configure, and encourage the pattern of test driven development. +---------------+-------------------------------------------+ | Read the docs | http://xdoctest.readthedocs.io/en/latest | +---------------+-------------------------------------------+ | Github | https://github.com/Erotemic/xdoctest | +---------------+-------------------------------------------+ | Pypi | https://pypi.org/project/xdoctest | +---------------+-------------------------------------------+ | PyCon 2020 | `Youtube Video`_ and `Google Slides`_ | +---------------+-------------------------------------------+ .. _Youtube Video: https://www.youtube.com/watch?v=CUjCqOw_oFk .. _Google Slides: https://docs.google.com/presentation/d/1563XL-n7534QmktrkLSjVqX36z5uhjUFrPw8wIO6z1c Getting Started 0: Installation -------------------------------- First ensure that you have :doc:`Python installed <../manual/installing_python>` and ideally are in a virtual environment. Install xdoctest using the pip. .. code:: bash pip install xdoctest Alternatively you can install xdoctest with optional packages. .. code:: bash pip install xdoctest[all] This ensures that the :py:mod:`pygments` and :py:mod:`colorama` packages are installed, which are required to color terminal output. Getting Started 1: Your first doctest ------------------------------------- If you already know how to write a doctest then you can skip to the next section. If you aren't familiar with doctests, this will help get you up to speed. Consider the following implementation the Fibonacci function. .. code:: python def fib(n): """ Python 3: Fibonacci series up to n """ a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() We can add a "doctest" in the "docstring" as both an example and a test of the code. All we have to do is prefix the doctest code with three right chevrons `` >>> ``. We can also use xdoctest directives to control the flow of doctest execution. .. code:: python def fib(n): """ Python 3: Fibonacci series up to n Example: >>> fib(1000) # xdoctest: +SKIP 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 """ a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() Now if this text was in a file called ``fib.py`` you could execute your doctest by running ``xdoctest fib.py``. Note that if ``fib.py`` was in a package called ``mymod``, you could equivalently run ``xdoctest -m mymod.fib``. In other words you can all doctests in a file by passing xdoctest the module name or the module path. Interestingly because this documentation is written in the ``xdoctest/__init__.py`` file, which is a Python file, that means we can write doctests in it. If you have xdoctest installed, you can use the xdoctest cli to execute the following code: ``xdoctest -m xdoctest.__init__ __doc__:0``. Also notice that the previous doctest is skipped due to the SKIP directive. For more information on directives see :doc:`the docs for the xdoctest directive module`. .. code:: python >>> # Python 3: Fibonacci series up to n >>> def fib(n): >>> a, b = 0, 1 >>> while a < n: >>> print(a, end=' ') >>> a, b = b, a+b >>> print() >>> fib(25) 0 1 1 2 3 5 8 13 21 Getting Started 2: Running your doctests ---------------------------------------- There are two ways to run xdoctest: (1) :py:mod:`pytest` or (2) the native :py:mod:`xdoctest` interface. The native interface is less opaque and implicit, but its purpose is to run doctests. The other option is to use the widely used pytest package. This allows you to run both unit tests and doctests with the same command and has many other advantages. It is recommended to use pytest for automatic testing (e.g. in your CI scripts), but for debugging it may be easier to use the native interface. Using the pytest interface ^^^^^^^^^^^^^^^^^^^^^^^^^^ When pytest is run, xdoctest is automatically discovered, but is disabled by default. This is because xdoctest needs to replace the builtin :py:mod:`pytest._pytest.doctest` plugin. To enable this plugin, run ``pytest`` with ``--xdoctest`` or ``--xdoc``. This can either be specified on the command line or added to your ``addopts`` options in the ``[pytest]`` section of your ``pytest.ini`` or ``tox.ini``. To run a specific doctest, xdoctest sets up pytest node names for these doctests using the following pattern: ``:::``. For example a doctest for a function might look like this ``mymod.py::funcname:0``, and a class method might look like this: ``mymod.py::ClassName::method:0`` Using the native interface. ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :py:mod:`xdoctest` module contains a :py:mod:`pytest` plugin, but also contains a native command line interface (CLI). The CLI is generated using :py:mod:`argparse`. For help you can run .. code-block:: bash xdoctest --help which produces something similar to the following output: .. code-block:: text usage: xdoctest [-h] [--version] [-m MODNAME] [-c COMMAND] [--style {auto,google,freeform}] [--analysis {auto,static,dynamic}] [--durations DURATIONS] [--time] [--colored COLORED] [--nocolor] [--offset] [--report {none,cdiff,ndiff,udiff,only_first_failure}] [--options OPTIONS] [--global-exec GLOBAL_EXEC] [--verbose VERBOSE] [--quiet] [--silent] [arg ...] Xdoctest 1.0.0 - on Python - 3.9.9 (main, Jan 6 2022, 18:33:12) [GCC 10.3.0] - discover and run doctests within a python package positional arguments: arg Ignored if optional arguments are specified, otherwise: Defaults --modname to arg.pop(0). Defaults --command to arg.pop(0). (default: None) optional arguments: -h, --help show this help message and exit --version Display version info and quit (default: False) -m MODNAME, --modname MODNAME Module name or path. If specified positional modules are ignored (default: None) -c COMMAND, --command COMMAND A doctest name or a command (list|all|). Defaults to all (default: None) --style {auto,google,freeform} Choose the style of doctests that will be parsed (default: auto) --analysis {auto,static,dynamic} How doctests are collected (default: auto) --durations DURATIONS Specify execution times for slowest N tests.N=0 will show times for all tests (default: None) --time Same as if durations=0 (default: False) --colored COLORED Enable or disable ANSI coloration in stdout (default: True) --nocolor Disable ANSI coloration in stdout --offset If True formatted source linenumbers will agree with their location in the source file. Otherwise they will be relative to the doctest itself. (default: False) --report {none,cdiff,ndiff,udiff,only_first_failure} Choose another output format for diffs on xdoctest failure (default: udiff) --options OPTIONS Default directive flags for doctests (default: None) --global-exec GLOBAL_EXEC Custom Python code to execute before every test (default: None) --verbose VERBOSE Verbosity level. 0 is silent, 1 prints out test names, 2 additionally prints test stdout, 3 additionally prints test source (default: 3) --quiet sets verbosity to 1 --silent sets verbosity to 0 The xdoctest interface can be run programmatically using ``xdoctest.doctest_module(path)``, which can be placed in the ``__main__`` section of any module as such: .. code-block:: python if __name__ == '__main__': import xdoctest xdoctest.doctest_module(__file__) This sets up the ability to invoke the ``xdoctest`` command line interface by invoking your module as a `main script `_: ``python -m ``, where ```` is the name of your module (e.g. `foo.bar`) and command follows the following rules: - If ```` is ``all``, then each enabled doctest in the module is executed: ``python -m all`` - If ```` is ``list``, then the names of each enabled doctest is listed. - If ```` is ``dump``, then all doctests are converted into a format suitable for unit testing, and dumped to stdout (new in 0.4.0). - If ```` is a "callname" (name of a function or a class and method), then that specific doctest is executed: ``python -m ``. Note: you can execute disabled doctests or functions without any arguments (zero-args) this way. XDoctest is a good demonstration of itself. After pip installing xdoctest, try running xdoctest on xdoctest. .. code:: bash xdoctest xdoctest If you would like a slightly less verbose output, try .. code:: bash xdoctest xdoctest --verbose=1 # or xdoctest xdoctest --verbose=0 You could also consider running xdoctests tests through pytest: .. code:: bash pytest $(python -c 'import xdoctest, pathlib; print(pathlib.Path(xdoctest.__file__).parent)') --xdoctest If you would like a slightly more verbose output, try .. code:: bash pytest -s --verbose --xdoctest-verbose=3 --xdoctest $(python -c 'import xdoctest, pathlib; print(pathlib.Path(xdoctest.__file__).parent)') If you ran these commands, the myriad of characters that flew across your screen are lots more examples of what you can do with doctests. You can also run doctests :doc:`inside Jupyter Notebooks <../manual/xdoc_with_jupyter>`. ''' __autogen__ = ''' mkinit xdoctest --nomods ''' __version__ = '1.3.0' # Expose only select submodules __submodules__ = [ 'runner', 'exceptions', ] from xdoctest import utils from xdoctest import docstr from xdoctest.runner import (doctest_module, doctest_callable,) from xdoctest.exceptions import (DoctestParseError, ExitTestException, MalformedDocstr, ExistingEventLoopError) __all__ = ['DoctestParseError', 'ExitTestException', 'MalformedDocstr', 'ExistingEventLoopError', 'doctest_module', 'doctest_callable', 'utils', 'docstr', '__version__'] Erotemic-xdoctest-fac8308/src/xdoctest/__main__.py000066400000000000000000000127651505122333300222600ustar00rootroot00000000000000#!/usr/bin/env python """ Provides a simple script for running module doctests. This should work even if the target module is unaware of xdoctest. """ import sys __tests__ = """ Ignore: xdoctest -m xdoctest.demo xdoctest ~/code/xdoctest/src/xdoctest/demo.py python -m xdoctest xdoctest all python -m xdoctest networkx all --options=+IGNORE_WHITESPACE """ def main(argv=None): """ Args: argv (List[str] | None): """ import xdoctest if argv is None: argv = sys.argv version_info = { 'sys_version': sys.version, 'version': xdoctest.__version__, } if '--version' in argv: print(version_info['version']) return 0 if '--version-info' in argv: print('sys_version = {}'.format(version_info['sys_version'])) print('file = {}'.format(__file__)) print('version = {}'.format(version_info['version'])) return 0 import argparse import textwrap from xdoctest import utils from os.path import exists # FIXME: default values are reporting incorrectly or are missformated class RawDescriptionDefaultsHelpFormatter( argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter): pass parser = argparse.ArgumentParser( prog='xdoctest', description=( 'Xdoctest {version} - on Python - {sys_version} - ' 'discover and run doctests within a python package' ).format(**version_info), formatter_class=RawDescriptionDefaultsHelpFormatter, ) # Ignored if optional arguments are specified, otherwise: # Defaults --modname to arg.pop(0). # Defaults --command to arg.pop(0). parser.add_argument( 'arg', nargs='*', help=utils.codeblock( ''' If the `--command` key / value pair is unspecified, the first positional argument is used as the command. ''')) parser.add_argument('--version', action='store_true', help='Display version and quit') parser.add_argument('--version-info', action='store_true', help='Display version and other info and quit') # The bulk of the argparse CLI is defined in the doctest example from xdoctest import doctest_example from xdoctest import runner runner._update_argparse_cli(parser.add_argument) doctest_example.DoctestConfig()._update_argparse_cli(parser.add_argument) args, unknown = parser.parse_known_args(args=argv[1:]) ns = args.__dict__.copy() if ns['version']: print(xdoctest.__version__) return 0 # ... postprocess args modname = ns['modname'] command = ns['command'] arg = ns['arg'] style = ns['style'] durations = ns['durations'] analysis = ns['analysis'] if ns['time']: durations = 0 # --- # Allow for positional args to specify defaults for unspecified optionals errors = [] if modname is None: if len(arg) == 0: errors += ['you must specify modname or modpath'] else: modname = arg.pop(0) if command is None: if len(arg) == 0: # errors += ['you must specify a command e.g (list, all)'] command = 'all' else: command = arg.pop(0) if errors: if len(errors) == 1: errmsg = errors[0] else: listed_errors = ', '.join(['({}) {}'.format(c, e) for c, e in enumerate(errors, start=1)]) errmsg = '{} errors: {}'.format(len(errors), listed_errors) parser.error(errmsg) # --- options = ns['options'] if options is None: options = '' pyproject_fpath = 'pyproject.toml' if exists(pyproject_fpath): try: import tomllib except ImportError: try: import tomli as tomllib except ImportError: pass else: with open(pyproject_fpath, 'rb') as file: pyproject_settings = tomllib.load(file) try: options = pyproject_settings['tool']['xdoctest']['options'] except KeyError: pass if exists('pytest.ini'): import configparser parser = configparser.ConfigParser() parser.read('pytest.ini') try: options = parser.get('pytest', 'xdoctest_options') except configparser.NoOptionError: pass ns['options'] = options from xdoctest import doctest_example config = doctest_example.DoctestConfig()._populate_from_cli(ns) if config['verbose'] > 2: print(textwrap.dedent( r''' ===================================== _ _ ___ ____ ____ ___ ____ ____ ___ \/ | \ | | | | |___ [__ | _/\_ |__/ |__| |___ | |___ ___] | ===================================== ''')) run_summary = xdoctest.doctest_module(modname, argv=[], style=style, command=command, verbose=config['verbose'], config=config, durations=durations, analysis=analysis) n_failed = run_summary.get('n_failed', 0) if n_failed > 0: return 1 else: return 0 if __name__ == '__main__': retcode = main() sys.exit(retcode) Erotemic-xdoctest-fac8308/src/xdoctest/__main__.pyi000066400000000000000000000001331505122333300224130ustar00rootroot00000000000000from typing import List __tests__: str def main(argv: List[str] | None = None): ... Erotemic-xdoctest-fac8308/src/xdoctest/_tokenize.py000066400000000000000000000634141505122333300225240ustar00rootroot00000000000000# type: ignore # Vendored from Python 3.11 """Tokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ __author__ = 'Ka-Ping Yee ' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') from builtins import open as _builtin_open from codecs import lookup, BOM_UTF8 import collections import functools from io import TextIOWrapper import itertools as _itertools import re import sys from token import * from token import EXACT_TOKEN_TYPES cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", "untokenize", "TokenInfo"] del token class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' Binnumber = r'0[bB](?:_?[01])+' Octnumber = r'0[oO](?:_?[0-7])+' Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) Expfloat = r'[0-9](?:_?[0-9])*' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Return the empty string, plus all of the valid string prefixes. def _all_string_prefixes(): # The valid string prefixes. Only contain the lower case versions, # and don't contain any permutations (include 'fr', but not # 'rf'). The various permutations will be generated. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] # if we add binary f-strings, add: ['fb', 'fbr'] result = {''} for prefix in _valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character for u in _itertools.product(*[(c, c.upper()) for c in t]): result.add(''.join(u)) return result @functools.lru_cache def _compile(expr): return re.compile(expr, re.UNICODE) # Note that since _all_string_prefixes includes the empty string, # StringPrefix can be the empty string (making it optional). StringPrefix = group(*_all_string_prefixes()) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Sorting in reverse order puts the long operators before their prefixes. # Otherwise if = came before ==, == would get recognized as two instances # of =. Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) Funny = group(r'\r?\n', Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) # For a given string prefix plus quotes, endpats maps it to a regex # to match the remainder of that string. _prefix can be empty, for # a normal single or triple quoted string (with no prefix). endpats = {} for _prefix in _all_string_prefixes(): endpats[_prefix + "'"] = Single endpats[_prefix + '"'] = Double endpats[_prefix + "'''"] = Single3 endpats[_prefix + '"""'] = Double3 del _prefix # A set of all of the single and triple quoted string prefixes, # including the opening quotes. single_quoted = set() triple_quoted = set() for t in _all_string_prefixes(): for u in (t + '"', t + "'"): single_quoted.add(u) for u in (t + '"""', t + "'''"): triple_quoted.add(u) del t, u tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in _itertools.chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in (NAME, NUMBER): tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = _builtin_open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except: buffer.close() raise def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the physical line. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) empty = _itertools.repeat(b"") rl_gen = _itertools.chain(consumed, iter(readline, b""), empty) return _tokenize(rl_gen.__next__, encoding) def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') last_line = b'' line = b'' while True: # loop over lines in stream try: # We capture the value of the line variable here because # readline uses the empty string '' to signal end of input, # hence `line` itself will always be overwritten at the end # of this loop. last_line = line line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) pos += len(comment_token) yield TokenInfo(NL, line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': if parenlev > 0: yield TokenInfo(NL, token, spos, epos, line) else: yield TokenInfo(NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif (initial in single_quoted or token[:2] in single_quoted or token[:3] in single_quoted): if token[-1] == '\n': # continued string strstart = (lnum, start) # Again, using the first 3 chars of the # token. This is looking for the matching end # regex for the correct type of quote # character. So it's really looking for # endpats["'"] or endpats['"'], by trying to # skip string prefix characters, if any. endprog = _compile(endpats.get(initial) or endpats.get(token[1]) or endpats.get(token[2])) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial.isidentifier(): # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos += 1 # Add an implicit NEWLINE if the input doesn't end in one if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"): yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '') for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') def generate_tokens(readline): """Tokenize a source reading Python code as unicode strings. This has the same API as tokenize(), except that it expects the *readline* callable to return str objects instead of bytes. """ return _tokenize(readline, None) def main(): import argparse # Helper error handling routines def perror(message): sys.stderr.write(message) sys.stderr.write('\n') def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise def _generate_tokens_from_c_tokenizer(source): """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" import _tokenize as c_tokenizer for info in c_tokenizer.TokenizerIter(source): tok, type, lineno, end_lineno, col_off, end_col_off, line = info yield TokenInfo(type, tok, (lineno, col_off), (end_lineno, end_col_off), line) if __name__ == "__main__": main() Erotemic-xdoctest-fac8308/src/xdoctest/checker.py000066400000000000000000000520041505122333300221320ustar00rootroot00000000000000""" Checks for got-vs-want statements A "got-string" is data produced by a doctest that we want to check matches some expected value. A "want-string" is a representation of the output we expect, if the "got-string" is different than the "want-string" the doctest will fail with a :class:`GotWantException`. A want string should come directly after a doctest and should not be prefixed by the three cheverons (``>>> ``). There are two types of data that a doctest could "get" as a "got-string", either the contents of standard out the value of an expression itself. A doctest that uses stdout might look like this .. code:: python >>> print('We expect this exact string') We expect this exact string A doctest that uses a raw expression might look like this .. code:: python >>> def foo(): >>> return 3 >>> foo() 3 In most cases it is best to use stdout to write your got-want tests because it is easier to control strings sent to stdout than it is to control the representation of expression-based "got-strings". """ import re import difflib from xdoctest import utils from xdoctest import constants from xdoctest import directive unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) BLANKLINE_MARKER = '' ELLIPSIS_MARKER = '...' TRAILING_WS = re.compile(r"[ \t]*$", re.UNICODE | re.MULTILINE) _EXCEPTION_RE = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P .*?) # don't blink: absorb stuff until... ^ (?P \w+ .*) # a line *starts* with alphanum. """, re.VERBOSE | re.MULTILINE | re.DOTALL) def check_got_vs_want(want, got_stdout, got_eval=constants.NOT_EVALED, runstate=None): """ Determines to check against either got_stdout or got_eval, and then does the comparison. If both stdout and eval "got" outputs are specified, then the "want" target may match either value. Args: want (str): target to match against got_stdout (str): output from stdout got_eval (str): output from an eval statement. runstate (xdoctest.directive.RuntimeState | None): current state Raises: GotWantException - If the "got" differs from this parts want. """ # If we did not want anything than ignore eval and stdout if got_eval is constants.NOT_EVALED: # if there was no eval, check stdout got = got_stdout flag = check_output(got, want, runstate) else: if not got_stdout: # If there was no stdout then use eval value. try: got = repr(got_eval) except Exception as ex: raise ExtractGotReprException('Error calling repr for {}. Caused by: {!r}'.format(type(got_eval), ex), ex) flag = check_output(got, want, runstate) else: # If there was eval and stdout, defer to stdout # but allow fallback on the eval. got = got_stdout flag = check_output(got, want, runstate) if not flag: # allow eval to fallback and save us, but if it fails, do a # diff with stdout got = repr(got_eval) flag = check_output(got, want, runstate) if not flag: got = got_stdout if not flag: msg = 'got differs with doctest want' exp = GotWantException(msg, got, want) raise exp return flag def _strip_exception_details(msg): """ Args: msg (str): Returns: str: """ # Support for IGNORE_EXCEPTION_DETAIL. # Get rid of everything except the exception name; in particular, drop # the possibly dotted module path (if any) and the exception message (if # any). We assume that a colon is never part of a dotted name, or of an # exception name. # E.g., given # "foo.bar.MyError: la di da" # return "MyError" # Or for "abc.def" or "abc.def:\n" return "def". start, end = 0, len(msg) # The exception name must appear on the first line. i = msg.find("\n") if i >= 0: end = i # retain up to the first colon (if any) i = msg.find(':', 0, end) if i >= 0: end = i # retain just the exception name i = msg.rfind('.', 0, end) if i >= 0: start = i + 1 return msg[start: end] def extract_exc_want(want): """ Args: want (str): the message supplied by the user Returns: str: the matchable exception part Example: extract_exc_want(''' Traceback (most recent call last): bar ''') """ want_ = utils.codeblock(want) m = _EXCEPTION_RE.search(want_) exc_want = m.group('msg') if m else None return exc_want def check_exception(exc_got, want, runstate=None): """ Checks want against an exception Args: exc_got (str): the exception message want (str): target to match against runstate (xdoctest.directive.RuntimeState | None): current state Raises: GotWantException - If the "got" differs from this parts want. Returns: bool: True if got matches want """ exc_want = extract_exc_want(want) if exc_want is None: # Reraise the error if the want message is formatted like an exception raise flag = check_output(exc_got, exc_want, runstate) # print('exc_want = {!r}'.format(exc_want)) # print('exc_got = {!r}'.format(exc_got)) # print('flag = {!r}'.format(flag)) if not flag and runstate['IGNORE_EXCEPTION_DETAIL']: exc_got1 = _strip_exception_details(exc_got) exc_want1 = _strip_exception_details(exc_want) flag = check_output(exc_got1, exc_want1, runstate) if flag: exc_got = exc_got1 exc_want = exc_want1 if not flag: msg = 'exception message is different' ex = GotWantException(msg, exc_got, exc_want) raise ex return flag def check_output(got, want, runstate=None): """ Does the actual comparison between `got` and `want` as long as the check is enabled. Args: got (str): text produced by the test want (str): target to match against runstate (xdoctest.directive.RuntimeState | None): current state Returns: bool: True if got matches want or if the check is disabled """ if not want: # nocover return True if want: # Try default if got == want: return True if runstate is None: runstate = directive.RuntimeState() got, want = normalize(got, want, runstate) return _check_match(got, want, runstate) return False def _check_match(got, want, runstate): """ Does the actual comparison between `got` and `want` Args: got (str): normalized text produced by the test want (str): normalized target to match against runstate (xdoctest.directive.RuntimeState | None): current state Returns: bool: True if got matches want """ if got == want: return True if runstate['ELLIPSIS']: if _ellipsis_match(got, want): return True return False def _ellipsis_match(got, want): r""" The ellipsis matching algorithm taken directly from standard doctest. Worst-case linear-time ellipsis matching. Args: got (str): want (str): Returns: bool: True if the text matches according to the ellipsis rule CommandLine: python -m xdoctest.checker _ellipsis_match Example: >>> _ellipsis_match('aaa', 'aa...aa') False >>> _ellipsis_match('anything', '...') True >>> _ellipsis_match('prefix-anything', 'prefix-...') True >>> _ellipsis_match('suffix-anything', 'prefix-...') False >>> _ellipsis_match('foo', '... foo') True >>> _ellipsis_match('took=3.4s', 'took=...s') True >>> _ellipsis_match('best=3.4s ave=4.5s', 'best=...s ave=...s') True >>> _ellipsis_match('took: 1.16e-05 s\nbest=9.63e-07 s ave=1.002e-06 ± 3e-08 s\n', >>> 'took: ...s\nbest=...s ave=...s\n') True """ if ELLIPSIS_MARKER not in want: return want == got # Find "the real" strings. # ws = want.split(ELLIPSIS_MARKER) # MODIFICATION: the ellipsis consumes all whitespace around it # for compatibility with whitespace normalization. ws = re.split(r'\s*{}\s*'.format(re.escape(ELLIPSIS_MARKER)), want, flags=re.MULTILINE) assert len(ws) >= 2 # Deal with exact matches possibly needed at one or both ends. startpos, endpos = 0, len(got) w = ws[0] if w: # starts with exact match if got.startswith(w): startpos = len(w) del ws[0] else: return False w = ws[-1] if w: # ends with exact match if got.endswith(w): endpos -= len(w) del ws[-1] else: return False if startpos > endpos: # Exact end matches required more characters than we have, as in # _ellipsis_match('aa...aa', 'aaa') return False # For the rest, we only need to find the leftmost non-overlapping # match for each piece. If there's no overall match that way alone, # there's no overall match period. for w in ws: # w may be '' at times, if there are consecutive ellipses, or # due to an ellipsis at the start or end of `want`. That's OK. # Search for an empty string succeeds, and doesn't change startpos. startpos = got.find(w, startpos, endpos) if startpos < 0: return False startpos += len(w) return True def normalize(got, want, runstate=None): r""" Normalizes the got and want string based on the runtime state. Adapted from doctest_nose_plugin.py from the nltk project: https://github.com/nltk/nltk Further extended to also support byte literals. Args: got (str): unnormalized got str. want (str): unnormalized want str. runstate (xdoctest.directive.RuntimeState | None): current state Returns: Tuple[str, str]: The normalized got and want str Example: >>> from xdoctest.checker import * # NOQA >>> want = "...\n(0, 2, {'weight': 1})\n(0, 3, {'weight': 2})" >>> got = "(0, 2, {'weight': 1})\n(0, 3, {'weight': 2})" >>> normalize(got, want) ("(0, 2, {'weight': 1}) (0, 3, {'weight': 2})", "... (0, 2, {'weight': 1}) (0, 3, {'weight': 2})") """ if runstate is None: runstate = directive.RuntimeState() def remove_prefixes(regex, text): return re.sub(regex, r'\1\2', text) def visible_text(lines): # TODO: backspaces # Any lines that end with only a carriage return are erased return [line for line in lines if not line.endswith('\r')] # Remove terminal colors if True: got = utils.strip_ansi(got) want = utils.strip_ansi(want) if True: # normalize python 2/3 byte/unicode prefixes got = remove_prefixes(unicode_literal_re, got) want = remove_prefixes(unicode_literal_re, want) # Note: normalizing away prefixes can cause weird "got" # results to print when there is a got-want mismatch. # For instance, if you get {'b': 22} but you want {'b': 2} # this will cause xdoctest to report that you wanted {'': 2} # because it reports the normalized version of the want message got = remove_prefixes(bytes_literal_re, got) want = remove_prefixes(bytes_literal_re, want) # Replace s if it is being used. if not runstate['DONT_ACCEPT_BLANKLINE']: want = remove_blankline_marker(want) # always remove trailing whitespace got = re.sub(TRAILING_WS, '', got) want = re.sub(TRAILING_WS, '', want) # normalize endling newlines want = want.rstrip() got = got.rstrip() # Always remove invisible text got_lines = got.splitlines(True) want_lines = want.splitlines(True) got_lines = visible_text(got_lines) want_lines = visible_text(want_lines) want = ''.join(want_lines) got = ''.join(got_lines) if runstate['NORMALIZE_WHITESPACE'] or runstate['IGNORE_WHITESPACE']: # all whitespace normalization # treat newlines and all whitespace as a single space got = ' '.join(got.split()) want = ' '.join(want.split()) if runstate['IGNORE_WHITESPACE']: # Completely remove whitespace got = re.sub(r'\s', '', got, flags=re.MULTILINE) want = re.sub(r'\s', '', want, flags=re.MULTILINE) if runstate['NORMALIZE_REPR']: def norm_repr(a, b): # If removing quotes would allow for a match, remove them. if not _check_match(a, b, runstate): for q in ['"', "'"]: if a.startswith(q) and a.endswith(q): if _check_match(a[1:-1], b, runstate): return a[1:-1] return a got = norm_repr(got, want) want = norm_repr(want, got) return got, want class ExtractGotReprException(AssertionError): """ Exception used when we are unable to extract a string "got" """ def __init__(self, msg, orig_ex): """ Args: msg (str): The exception message orig_ex (Exception): The parent exception """ super(ExtractGotReprException, self).__init__(msg) self.orig_ex = orig_ex class GotWantException(AssertionError): """ Exception used when the "got" output of a doctest differs from the expected "want" output. """ def __init__(self, msg, got, want): """ Args: msg (str): The exception message got (str): The unnormalized got str want (str): The unnormalized want str """ super(GotWantException, self).__init__(msg) self.got = got self.want = want def _do_a_fancy_diff(self, runstate=None): # Not unless they asked for a fancy diff. got = self.got want = self.want if runstate is None: runstate = directive.RuntimeState() # ndiff does intraline difference marking, so can be useful even # for 1-line differences. if runstate['REPORT_NDIFF']: return True # The other diff types need at least a few lines to be helpful. if runstate['REPORT_UDIFF'] or runstate['REPORT_CDIFF']: return want.count('\n') > 2 and got.count('\n') > 2 return False def output_difference(self, runstate=None, colored=True): """ Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). The `runstate` contains option flags used to compare `want` and `got`. Args: runstate (xdoctest.directive.RuntimeState | None): current state colored (bool): if the text should be colored Returns: str: formatted difference text Note: This does not check if got matches want, it only outputs the raw differences. Got/Want normalization may make the differences appear more exaggerated than they are. """ got = self.got want = self.want if runstate is None: runstate = directive.RuntimeState() # Don't normalize because it usually removes the newlines runstate_ = runstate.to_dict() # Don't normalize whitespaces in report for better visibility runstate_['NORMALIZE_WHITESPACE'] = False runstate_['IGNORE_WHITESPACE'] = False got, want = normalize(got, want, runstate_) # If s are being used, then replace blank lines # with in the actual output string. # if not runstate['DONT_ACCEPT_BLANKLINE']: # got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) got = utils.ensure_unicode(got) # Check if we should use diff. if self._do_a_fancy_diff(runstate): # Split want & got into lines. want_lines = want.splitlines(True) got_lines = got.splitlines(True) # Use difflib to find their differences. if runstate['REPORT_UDIFF']: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif runstate['REPORT_CDIFF']: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif runstate['REPORT_NDIFF']: # TODO: Is there a way to make Differ ignore whitespace if that # runtime directive is specified? engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: raise ValueError('Invalid difflib option') # Remove trailing whitespace on diff output. diff = [line.rstrip() + '\n' for line in diff] diff_text = ''.join(diff) if colored: diff_text = utils.highlight_code(diff_text, lexer_name='diff') text = 'Differences (%s):\n' % kind + utils.indent(diff_text) else: # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: if colored: got = utils.color_text(got, 'red') want = utils.color_text(want, 'red') text = 'Expected:\n{}\nGot:\n{}'.format( utils.indent(self.want), utils.indent(self.got)) elif want: if colored: got = utils.color_text(got, 'red') want = utils.color_text(want, 'red') text = 'Expected:\n{}\nGot nothing\n'.format(utils.indent(want)) elif got: # nocover raise AssertionError('impossible state') text = 'Expected nothing\nGot:\n{}'.format(utils.indent(got)) else: # nocover raise AssertionError('impossible state') text = 'Expected nothing\nGot nothing\n' return text def output_repr_difference(self, runstate=None): """ Constructs a repr difference with minimal normalization. Args: runstate (xdoctest.directive.RuntimeState | None): current state Returns: str: formatted repr difference text """ minimal_got = self.got.rstrip() minimal_want = self.want.rstrip() if runstate is None: runstate = directive.RuntimeState() # Don't normalize because it usually removes the newlines runstate_ = runstate.to_dict() if not runstate_['DONT_ACCEPT_BLANKLINE']: minimal_want = remove_blankline_marker(minimal_want) lines = [ ('Repr Difference:'), # TODO: get a semi-normalized output before showing repr? (' got = {!r}'.format(minimal_got)), (' want = {!r}'.format(minimal_want)), ] return '\n'.join(lines) def remove_blankline_marker(text): r""" Args: text (str): input text Returns: str: output text Example: >>> text1 = 'foo\n{}\nbar'.format(BLANKLINE_MARKER) >>> text2 = '{}\nbar'.format(BLANKLINE_MARKER) >>> text4 = 'foo\n{}'.format(BLANKLINE_MARKER) >>> text3 = '{}'.format(BLANKLINE_MARKER) >>> text5 = text1 + text1 + text1 >>> assert BLANKLINE_MARKER not in remove_blankline_marker(text1) >>> assert BLANKLINE_MARKER not in remove_blankline_marker(text2) >>> assert BLANKLINE_MARKER not in remove_blankline_marker(text3) >>> assert BLANKLINE_MARKER not in remove_blankline_marker(text4) >>> assert BLANKLINE_MARKER not in remove_blankline_marker(text5) """ pos_lb = '(?<=\n)' # positive lookbehind blankline_pattern = '|'.join([ '{pos_lb}{marker}\n', '{marker}\n', '\n{marker}', '{marker}']).format( marker=BLANKLINE_MARKER, pos_lb=pos_lb) # blankline_pattern = r'(?<=\n)[ ]*{}\n?'.format(re.escape(BLANKLINE_MARKER)) new_text = re.sub(blankline_pattern, '\n', text, flags=re.MULTILINE) return new_text if __name__ == '__main__': """ CommandLine: python -m xdoctest.checker all """ import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/checker.pyi000066400000000000000000000030511505122333300223010ustar00rootroot00000000000000import xdoctest from typing import Tuple from _typeshed import Incomplete unicode_literal_re: Incomplete bytes_literal_re: Incomplete BLANKLINE_MARKER: str ELLIPSIS_MARKER: str TRAILING_WS: Incomplete def check_got_vs_want(want: str, got_stdout: str, got_eval: str = ..., runstate: xdoctest.directive.RuntimeState | None = None): ... def extract_exc_want(want: str) -> str: ... def check_exception( exc_got: str, want: str, runstate: xdoctest.directive.RuntimeState | None = None) -> bool: ... def check_output( got: str, want: str, runstate: xdoctest.directive.RuntimeState | None = None) -> bool: ... def normalize( got: str, want: str, runstate: xdoctest.directive.RuntimeState | None = None ) -> Tuple[str, str]: ... class ExtractGotReprException(AssertionError): orig_ex: Exception def __init__(self, msg: str, orig_ex: Exception) -> None: ... class GotWantException(AssertionError): got: str want: str def __init__(self, msg: str, got: str, want: str) -> None: ... def output_difference(self, runstate: xdoctest.directive.RuntimeState | None = None, colored: bool = True) -> str: ... def output_repr_difference( self, runstate: xdoctest.directive.RuntimeState | None = None) -> str: ... def remove_blankline_marker(text: str) -> str: ... Erotemic-xdoctest-fac8308/src/xdoctest/constants.py000066400000000000000000000030151505122333300225400ustar00rootroot00000000000000""" Defines sentinel values for internal xdoctest usage """ # Create the most singleton object ever to avoid reload issues # this is based on ubelt.NoParam, which has more docs on how this works class _NOT_EVAL_TYPE: """ This is a singleton object used as a sentinel value. The value of :data:`NoParam` is robust to reloading, pickling, and copying. See [SO_41048643]_ for more details. References: .. [SO_41048643]: http://stackoverflow.com/questions/41048643/a-second-none Example: >>> from xdoctest.constants import NOT_EVALED, _NOT_EVAL_TYPE # NOQA >>> import copy >>> assert not NOT_EVALED >>> assert str(NOT_EVALED) == '' >>> assert repr(NOT_EVALED) == '' >>> assert NOT_EVALED(...) is None >>> assert copy.copy(NOT_EVALED) is NOT_EVALED >>> assert copy.deepcopy(NOT_EVALED) is NOT_EVALED >>> assert _NOT_EVAL_TYPE() is NOT_EVALED """ def __new__(cls): return NOT_EVALED def __reduce__(self): return (_NOT_EVAL_TYPE, ()) def __copy__(self): return NOT_EVALED def __deepcopy__(self, memo): return NOT_EVALED def __call__(self, default): pass def __str__(cls): return '' def __repr__(cls): return '' def __bool__(self): return False __nonzero__ = __bool__ try: NOT_EVALED # type: ignore except NameError: NOT_EVALED = object.__new__(_NOT_EVAL_TYPE) # type: _NOT_EVAL_TYPE Erotemic-xdoctest-fac8308/src/xdoctest/constants.pyi000066400000000000000000000005101505122333300227060ustar00rootroot00000000000000class _NOT_EVAL_TYPE: def __new__(cls): ... def __reduce__(self): ... def __copy__(self): ... def __deepcopy__(self, memo): ... def __call__(self, default) -> None: ... def __bool__(self): ... __nonzero__ = __bool__ NOT_EVALED: _NOT_EVAL_TYPE Erotemic-xdoctest-fac8308/src/xdoctest/core.py000066400000000000000000000617331505122333300214670ustar00rootroot00000000000000""" Core methods used by xdoctest runner and plugin code to statically extract doctests from a module or package. The following is a glossary of terms and jargon used in this repo. * callname - the name of a callable function, method, class etc... e.g. ``myfunc``, ``MyClass``, or ``MyClass.some_method``. * got / want - a test that produces stdout or a value to check. Whatever is produced is what you "got" and whatever is expected is what you "want". See :mod:`xdoctest.checker` for more details. * directives - special in-doctest comments that change the behavior of the doctests at runtime. See :mod:`xdoctest.directive` for more details. * chevrons - the three cheverons (``>>> ``) or right angle brackets are the standard prefix for a doctest, also referred to as a PS1 line in the parser. * zero-args - a function that can be called without any arguments. * freeform style - This is the term used to refer to a doctest that could be anywhere in the docstring. The alternative are structured doctests where they are only expected in known positions like in "Example blocks" for google and numpy style docstrings. * TODO - complete this list (Make an issue or PR if there is any term you don't immediately understand!). """ import textwrap import warnings import itertools as it import types from os.path import exists from fnmatch import fnmatch from xdoctest import dynamic_analysis from xdoctest import static_analysis from xdoctest import parser from xdoctest import exceptions from xdoctest import doctest_example from xdoctest import utils from xdoctest.docstr import docscrape_google from xdoctest.utils import util_import from xdoctest import global_state DOCTEST_STYLES = [ 'freeform', 'google', 'auto', # 'numpy', # TODO ] __docstubs__ = """ import xdoctest.doctest_example """ def parse_freeform_docstr_examples(docstr, callname=None, modpath=None, lineno=1, fpath=None, asone=True): r""" Finds free-form doctests in a docstring. This is similar to the original doctests because these tests do not requires a google/numpy style header. Some care is taken to avoid enabling tests that look like disabled google doctests or scripts. Args: docstr (str): an extracted docstring callname (str | None): the name of the callable (e.g. function, class, or method) that this docstring belongs to. modpath (str | PathLike | None): original module the docstring is from lineno (int): the line number (starting from 1) of the docstring. i.e. if you were to go to this line number in the source file the starting quotes of the docstr would be on this line. Defaults to 1. fpath (str | PathLike | None): the file that the docstring is from (if the file was not a module, needed for backwards compatibility) asone (bool): if False doctests are broken into multiple examples based on spacing, otherwise they are executed as a single unit. Defaults to True. Yields: xdoctest.doctest_example.DocTest : doctest object Raises: xdoctest.exceptions.DoctestParseError: if an error occurs in parsing CommandLine: python -m xdoctest.core parse_freeform_docstr_examples Example: >>> # TODO: move this to unit tests and make the doctest simpler >>> from xdoctest import core >>> from xdoctest import utils >>> docstr = utils.codeblock( >>> ''' >>> freeform >>> >>> doctest >>> >>> hasmultilines >>> whoppie >>> >>> 'but this is the same doctest' >>> >>> >>> secondone >>> >>> Script: >>> >>> 'special case, dont parse me' >>> >>> DisableDoctest: >>> >>> 'special case, dont parse me' >>> want >>> >>> AnythingElse: >>> >>> 'general case, parse me' >>> want >>> ''') >>> examples = list(parse_freeform_docstr_examples(docstr, asone=True)) >>> assert len(examples) == 1 >>> examples = list(parse_freeform_docstr_examples(docstr, asone=False)) >>> assert len(examples) == 3 """ def doctest_from_parts(parts, num, curr_offset): # FIXME: this will cause line numbers to become misaligned nested = [ p.orig_lines if p.want is None else p.orig_lines + p.want.splitlines() for p in parts ] docsrc = '\n'.join(list(it.chain.from_iterable(nested))) docsrc = textwrap.dedent(docsrc) example = doctest_example.DocTest(docsrc, modpath=modpath, callname=callname, num=num, lineno=lineno + curr_offset, fpath=fpath) # rebase the offsets relative to the test lineno (ie start at 0) unoffset = parts[0].line_offset for p in parts: p.line_offset -= unoffset # We've already parsed the parts, so we dont need to do it again example._parts = parts return example if global_state.DEBUG_CORE: # nocover print('Parsing docstring for callname={} in modpath={}'.format( callname, modpath)) respect_google_headers = True if respect_google_headers: # pragma: nobranch # TODO: make configurable # When in freeform mode we still try to respect google doctest patterns # that prevent a test from being run. special_skip_patterns = [ 'DisableDoctest:', 'DisableExample:', 'SkipDoctest:', 'Ignore:', 'Script:', 'Benchmark:', 'Sympy:', ] else: special_skip_patterns = [] # nocover special_skip_patterns_ = tuple([ p.lower() for p in special_skip_patterns ]) def _start_ignoring(prev): return (special_skip_patterns_ and isinstance(prev, str) and prev.strip().lower().endswith(special_skip_patterns_)) # parse into doctest and plaintext parts info = dict(callname=callname, modpath=modpath, lineno=lineno, fpath=fpath) all_parts = list(parser.DoctestParser().parse(docstr, info)) curr_parts = [] curr_offset = 0 num = 0 prev_part = None ignoring = False for part in all_parts: if isinstance(part, str): # Part is a plaintext if asone: # Lump all doctest parts into one example if not curr_parts: curr_offset += part.count('\n') + 1 else: # nocover if curr_parts: # Group the current parts into a single doctest example = doctest_from_parts(curr_parts, num, curr_offset) yield example # Initialize empty parts for a new doctest curr_offset += sum(p.n_lines for p in curr_parts) num += 1 curr_parts = [] curr_offset += part.count('\n') + 1 # stop ignoring ignoring = False else: # If the previous part was text-based, and matches a special skip # ignore pattern then ignore all tests until a new doctest block # begins. (different doctest blocks are separated by plaintext) if ignoring or _start_ignoring(prev_part): ignoring = True if asone: if not curr_parts: curr_offset += part.n_lines else: curr_offset += part.n_lines else: # Append part to the current parts curr_parts.append(part) prev_part = part if curr_parts: # Group remaining parts into the final doctest example = doctest_from_parts(curr_parts, num, curr_offset) yield example def parse_google_docstr_examples(docstr, callname=None, modpath=None, lineno=1, fpath=None, eager_parse=True): """ Parses Google-style doctests from a docstr and generates example objects Args: docstr (str): an extracted docstring callname (str | None): the name of the callable (e.g. function, class, or method) that this docstring belongs to. modpath (str | PathLike | None): original module the docstring is from lineno (int): the line number (starting from 1) of the docstring. i.e. if you were to go to this line number in the source file the starting quotes of the docstr would be on this line. Defaults to 1. fpath (str | PathLike | None): the file that the docstring is from (if the file was not a module, needed for backwards compatibility) eager_parse (bool): if True eagerly evaluate the parser inside the google example blocks. Defaults to True. Yields: xdoctest.doctest_example.DocTest : doctest object Raises: xdoctest.exceptions.MalformedDocstr: if an error occurs in finding google blocks xdoctest.exceptions.DoctestParseError: if an error occurs in parsing """ try: blocks = docscrape_google.split_google_docblocks(docstr) except exceptions.MalformedDocstr: print('ERROR PARSING {} GOOGLE BLOCKS IN {} ON line {}'.format( callname, modpath, lineno)) print('Did you forget to make a docstr with newlines raw?') raise example_blocks = [] example_tags = ('Example', 'Doctest', 'Script', 'Benchmark') for type, block in blocks: if type.startswith(example_tags): example_blocks.append((type, block)) for num, (type, (docsrc, offset)) in enumerate(example_blocks): # Add one because offset indicates the position of the block-label # and the body of the block always starts on the next line. label_lineno = lineno + offset body_lineno = label_lineno + 1 example = doctest_example.DocTest(docsrc, modpath, callname, num, lineno=body_lineno, fpath=fpath, block_type=type) if eager_parse: # parse on the fly to be consistent with freeform? example._parse() yield example def parse_auto_docstr_examples(docstr, *args, **kwargs): """ First try to parse google style, but if no tests are found use freeform style. """ if global_state.DEBUG_CORE: # nocover print('Automatic style is trying google parsing') n_found = 0 try: for example in parse_google_docstr_examples(docstr, *args, **kwargs): n_found += 1 yield example except Exception: if n_found > 0: raise # no google style tests were found, parse in freeform if n_found == 0: if global_state.DEBUG_CORE: # nocover print('Automatic style is trying freeform parsing') for example in parse_freeform_docstr_examples(docstr, *args, **kwargs): yield example def parse_docstr_examples(docstr, callname=None, modpath=None, lineno=1, style='auto', fpath=None, parser_kw=None): """ Parses doctests from a docstr and generates example objects. The style influences which tests are found. Args: docstr (str): a previously extracted docstring callname (str | None): the name of the callable (e.g. function, class, or method) that this docstring belongs to. modpath (str | PathLike | None): original module the docstring is from lineno (int): the line number (starting from 1) of the docstring. i.e. if you were to go to this line number in the source file the starting quotes of the docstr would be on this line. Defaults to 1. style (str): expected doctest style, which can be "google", "freeform", or "auto". Defaults to 'auto'. fpath (str | PathLike | None): the file that the docstring is from (if the file was not a module, needed for backwards compatibility) parser_kw (dict | None): passed to the parser as keyword args Yields: xdoctest.doctest_example.DocTest : parsed example CommandLine: python -m xdoctest.core parse_docstr_examples Example: >>> from xdoctest.core import * >>> from xdoctest import utils >>> docstr = utils.codeblock( ... ''' ... >>> 1 + 1 # xdoctest: +SKIP ... 2 ... >>> 2 + 2 ... 4 ... ''') >>> examples = list(parse_docstr_examples(docstr, 'name', fpath='foo.txt', style='freeform')) >>> print(len(examples)) 1 >>> examples = list(parse_docstr_examples(docstr, fpath='foo.txt')) """ if global_state.DEBUG_CORE: # nocover print('Parsing docstring examples for ' 'callname={} in modpath={}'.format(callname, modpath)) if style == 'freeform': parser = parse_freeform_docstr_examples elif style == 'google': parser = parse_google_docstr_examples elif style == 'auto': parser = parse_auto_docstr_examples # TODO: epdoc # TODO: # elif style == 'numpy': # parser = parse_numpy_docstr_examples else: raise KeyError('Unknown style={}. Valid styles are {}'.format( style, DOCTEST_STYLES)) if global_state.DEBUG_CORE: # nocover print('parser = {!r}'.format(parser)) n_parsed = 0 try: if parser_kw is None: parser_kw = {} for example in parser(docstr, callname=callname, modpath=modpath, fpath=fpath, lineno=lineno, **parser_kw): n_parsed += 1 yield example except Exception as ex: if global_state.DEBUG_CORE: # nocover print('Caught an error when parsing') msg = ('Cannot scrape callname={} in modpath={} line={}.\n' 'Caused by: {}\n') # raise msg = msg.format(callname, modpath, lineno, repr(ex)) if isinstance(ex, exceptions.DoctestParseError): # TODO: Can we print a nicer syntax error here? msg += '{}\n'.format(ex.string) msg += 'Original Error: {}\n'.format(repr(ex.orig_ex)) if isinstance(ex.orig_ex, SyntaxError): extra_help = '' if ex.orig_ex.text: extra_help += utils.ensure_unicode(ex.orig_ex.text) if ex.orig_ex.offset is not None: extra_help += ' ' * (ex.orig_ex.offset - 1) + '^' if extra_help: msg += '\n' + extra_help # Always warn when something bad is happening. # However, dont error if the docstr simply has bad syntax print('msg = {}'.format(msg)) warnings.warn(msg) if isinstance(ex, exceptions.MalformedDocstr): pass elif isinstance(ex, exceptions.DoctestParseError): pass else: raise if global_state.DEBUG_CORE: # nocover print('Finished parsing {} examples'.format(n_parsed)) def _rectify_to_modpath(modpath_or_name): """ if modpath_or_name is a name, statically converts it to a path """ if isinstance(modpath_or_name, types.ModuleType): raise TypeError('Expected a static module but got a dynamic one') # NOTE: running modname_to_modpath is a bottleneck in pytest collect Using # a quick heuristic to bypass it: if the module name has '/' in it, is is # very likely a path. if '/' in modpath_or_name or '\\' in modpath_or_name: modpath = None else: modpath = util_import.modname_to_modpath(modpath_or_name) if modpath is None: if exists(modpath_or_name): modpath = modpath_or_name else: raise ValueError('Cannot find module={}'.format(modpath_or_name)) return modpath def package_calldefs(pkg_identifier, exclude=[], ignore_syntax_errors=True, analysis='auto'): """ Statically generates all callable definitions in a module or package Args: pkg_identifier (str | ModuleType): path to or name of the module to be tested (or the live module itself, which is not recommended) exclude (List[str]): glob-patterns of file names to exclude ignore_syntax_errors (bool): if False raise an error when syntax errors occur in a doctest Defaults to True. analysis (str): if 'static', only static analysis is used to parse call definitions. If 'auto', uses dynamic analysis for compiled python extensions, but static analysis elsewhere, if 'dynamic', then dynamic analysis is used to parse all calldefs. Defaults to 'auto'. Yields: Tuple[Dict[str, xdoctest.static_analysis.CallDefNode], str | ModuleType] - * item[0]: the mapping of callnames-to-calldefs * item[1]: the path to the file containing the doctest (usually a module) or the module itself Example: >>> pkg_identifier = 'xdoctest.core' >>> testables = list(package_calldefs(pkg_identifier)) >>> assert len(testables) == 1 >>> calldefs, modpath = testables[0] >>> assert util_import.modpath_to_modname(modpath) == pkg_identifier >>> assert 'package_calldefs' in calldefs """ if global_state.DEBUG_CORE: # nocover print('Find package calldefs: pkg_identifier = {!r}'.format(pkg_identifier)) if isinstance(pkg_identifier, types.ModuleType): # Case where we are forced to use a live module identifiers = [pkg_identifier] else: pkgpath = _rectify_to_modpath(pkg_identifier) identifiers = list(static_analysis.package_modpaths( pkgpath, with_pkg=True, with_libs=True)) for module_identifier in identifiers: if isinstance(module_identifier, str): modpath = module_identifier modname = util_import.modpath_to_modname(modpath) if any(fnmatch(modname, pat) for pat in exclude): continue if not exists(modpath): warnings.warn( 'Module {} does not exist. ' 'Is it an old pyc file?'.format(modname)) continue try: calldefs = parse_calldefs(module_identifier, analysis=analysis) if calldefs is not None: yield calldefs, module_identifier except SyntaxError as ex: # Handle error due to the actual code containing errors msg = 'Cannot parse module={}.\nCaused by: {}' msg = msg.format(module_identifier, ex) if ignore_syntax_errors: warnings.warn(msg) # real code or docstr contained errors else: raise SyntaxError(msg) def parse_calldefs(module_identifier, analysis='auto'): """ Parse calldefs from a single module using either static or dynamic analysis. Args: module_identifier (str | ModuleType): path to or name of the module to be tested (or the live module itself, which is not recommended) analysis (str, default='auto'): if 'static', only static analysis is used to parse call definitions. If 'auto', uses dynamic analysis for compiled python extensions, but static analysis elsewhere, if 'dynamic', then dynamic analysis is used to parse all calldefs. Returns: Dict[str, xdoctest.static_analysis.CallDefNode]: the mapping of callnames-to-calldefs within the module. """ if isinstance(module_identifier, types.ModuleType): # identifier is a live module need_dynamic = True else: # identifier is a path to a module modpath = module_identifier # Certain files (notebooks and c-extensions) require dynamic analysis need_dynamic = modpath.endswith( static_analysis._platform_pylib_exts()) if modpath.endswith('.ipynb'): need_dynamic = True if analysis == 'static': if need_dynamic: # Some modules can only be parsed dynamically raise Exception(( 'Static analysis required, but {} requires ' 'dynamic analysis').format(module_identifier)) do_dynamic = False elif analysis == 'dynamic': do_dynamic = True elif analysis == 'auto': do_dynamic = need_dynamic else: raise KeyError(analysis) if global_state.DEBUG_CORE: # nocover print('About to parse calldefs with do_dynamic={}'.format(do_dynamic)) calldefs = None if do_dynamic: try: calldefs = dynamic_analysis.parse_dynamic_calldefs(module_identifier) except (ImportError, RuntimeError) as ex: # Some modules are just c modules msg = 'Cannot dynamically parse module={}.\nCaused by: {!r} {}' msg = msg.format(module_identifier, type(ex), ex) warnings.warn(msg) except Exception as ex: msg = 'Cannot dynamically parse module={}.\nCaused by: {!r} {}' msg = msg.format(module_identifier, type(ex), ex) warnings.warn(msg) raise else: calldefs = static_analysis.parse_static_calldefs(fpath=module_identifier) if global_state.DEBUG_CORE: # nocover print('Found {} calldefs'.format(len(calldefs))) return calldefs def parse_doctestables(module_identifier, exclude=[], style='auto', ignore_syntax_errors=True, parser_kw={}, analysis='auto'): """ Parses all doctests within top-level callables of a module and generates example objects. The style influences which tests are found. Args: module_identifier (str | PathLike | ModuleType): path or name of a module or a module itself (we prefer a path) exclude (List[str]): glob-patterns of file names to exclude style (str): expected doctest style (e.g. google, freeform, auto) ignore_syntax_errors (bool, default=True): if False raise an error when syntax errors parser_kw: extra args passed to the parser analysis (str, default='auto'): if 'static', only static analysis is used to parse call definitions. If 'auto', uses dynamic analysis for compiled python extensions, but static analysis elsewhere, if 'dynamic', then dynamic analysis is used to parse all calldefs. Yields: xdoctest.doctest_example.DocTest : parsed doctest example objects CommandLine: python -m xdoctest.core parse_doctestables Example: >>> module_identifier = 'xdoctest.core' >>> testables = list(parse_doctestables(module_identifier)) >>> this_example = None >>> for example in testables: >>> # print(example) >>> if example.callname == 'parse_doctestables': >>> this_example = example >>> assert this_example is not None >>> assert this_example.callname == 'parse_doctestables' Example: >>> from xdoctest import utils >>> docstr = utils.codeblock( ... ''' ... >>> 1 + 1 # xdoctest: +SKIP ... 2 ... >>> 2 + 2 ... 4 ... ''') >>> temp = utils.TempDoctest(docstr, 'test_modfile') >>> modpath = temp.modpath >>> examples = list(parse_doctestables(modpath, style='freeform')) >>> print(len(examples)) 1 """ if style not in DOCTEST_STYLES: raise KeyError('Unknown style={}. Valid styles are {}'.format( style, DOCTEST_STYLES)) # Statically parse modules and their doctestable callables in a package for calldefs, modpath in package_calldefs(module_identifier, exclude, ignore_syntax_errors, analysis=analysis): for callname, calldef in calldefs.items(): docstr = calldef.docstr if calldef.docstr is not None: lineno = calldef.doclineno example_gen = parse_docstr_examples( docstr, callname=callname, modpath=modpath, lineno=lineno, style=style, parser_kw=parser_kw) if global_state.DEBUG_CORE: # nocover for example in example_gen: print(' * Yield example={}'.format(example)) yield example else: for example in example_gen: yield example if __name__ == '__main__': """ CommandLine: python -m xdoctest.core all """ import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/core.pyi000066400000000000000000000037171505122333300216360ustar00rootroot00000000000000from os import PathLike import xdoctest from types import ModuleType from typing import List from typing import Dict from _typeshed import Incomplete from collections.abc import Generator from typing import Any import xdoctest.doctest_example DOCTEST_STYLES: Incomplete __docstubs__: str def parse_freeform_docstr_examples( docstr: str, callname: str | None = None, modpath: str | PathLike | None = None, lineno: int = 1, fpath: str | PathLike | None = None, asone: bool = True ) -> Generator[xdoctest.doctest_example.DocTest, None, Any]: ... def parse_google_docstr_examples( docstr: str, callname: str | None = None, modpath: str | PathLike | None = None, lineno: int = 1, fpath: str | PathLike | None = None, eager_parse: bool = True ) -> Generator[xdoctest.doctest_example.DocTest, None, None]: ... def parse_auto_docstr_examples(docstr, *args, **kwargs) -> Generator[Any, None, None]: ... def parse_docstr_examples( docstr: str, callname: str | None = None, modpath: str | PathLike | None = None, lineno: int = 1, style: str = 'auto', fpath: str | PathLike | None = None, parser_kw: dict | None = None ) -> Generator[xdoctest.doctest_example.DocTest, None, None]: ... def package_calldefs(pkg_identifier: str | ModuleType, exclude: List[str] = ..., ignore_syntax_errors: bool = True, analysis: str = 'auto') -> Generator[None, None, None]: ... def parse_calldefs( module_identifier: str | ModuleType, analysis: str = 'auto' ) -> Dict[str, xdoctest.static_analysis.CallDefNode]: ... def parse_doctestables( module_identifier: str | PathLike | ModuleType, exclude: List[str] = ..., style: str = 'auto', ignore_syntax_errors: bool = True, parser_kw=..., analysis: str = 'auto' ) -> Generator[xdoctest.doctest_example.DocTest, None, None]: ... Erotemic-xdoctest-fac8308/src/xdoctest/demo.py000066400000000000000000000057101505122333300214540ustar00rootroot00000000000000""" This file contains quick demonstrations of how to use xdoctest CommandLine: xdoctest -m xdoctest.demo xdoctest -m xdoctest.demo --verbose 0 xdoctest -m xdoctest.demo --silent xdoctest -m xdoctest.demo --quiet """ def myfunc(): """ Demonstrates how to write a doctest. Prefix with ``>>>`` and ideally place in an `Example:` block. You can also change Example, Ignore will Prefix with ``>>>`` and ideally place in an `Example:` block. CommandLine: # it would be nice if sphinx.ext.napoleon could handle this xdoctest -m xdoctest.demo myfunc Example: >>> result = myfunc() >>> assert result == 123 Ignore: >>> # it would be nice if sphinx.ext.napoleon could ignore this >>> print('this test is not run') """ return 123 class MyClass: """ Example: >>> self = MyClass.demo() >>> print('self.data = {!r}'.format(self.data)) """ def __init__(self, *args, **kw): """ Example: >>> # xdoctest: +REQUIRES(--fail) >>> raise Exception """ self.data = (args, kw) @classmethod def demo(cls, **kw): """ CommandLine: xdoctest -m xdoctest.demo MyClass.demo xdoctest -m xdoctest.demo MyClass.demo --say Example: >>> print('starting my doctest') >>> self = MyClass.demo(demo='thats my demo') >>> # xdoc: +REQUIRES(--say) >>> print('self.data = {!r}'.format(self.data)) """ return MyClass(['spam'] * 42, ['eggs'], **kw) @staticmethod def always_fails(): """ CommandLine: xdoctest -m xdoctest.demo MyClass.always_fails xdoctest -m xdoctest.demo MyClass.always_fails --fail xdoctest -m xdoctest.demo MyClass.always_fails --fail --really xdoctest -m xdoctest.demo MyClass.always_fails:0 --fail xdoctest -m xdoctest.demo MyClass.always_fails:1 --fail xdoctest -m xdoctest.demo MyClass.always_fails:2 --fail xdoctest -m xdoctest.demo MyClass.always_fails:3 --fail --really Example: >>> # xdoctest: +REQUIRES(--fail) >>> raise Exception('doctest always fails') Example: >>> # xdoctest: +REQUIRES(--fail) >>> MyClass.demo().always_fails() Example: >>> # xdoctest: +REQUIRES(--fail) >>> print('there is no way to fail') There are so many ways to fail Example: >>> # xdoctest: +REQUIRES(--fail) >>> # xdoctest: +REQUIRES(--really) >>> raise Exception # xdoctest: +SKIP >>> print('did you know') # xdoctest: +IGNORE_WANT directives are useful >>> print('match this') ... >>> print('match this') # xdoctest: -ELLIPSIS ... """ raise Exception('func always fails') Erotemic-xdoctest-fac8308/src/xdoctest/demo.pyi000066400000000000000000000004171505122333300216240ustar00rootroot00000000000000from _typeshed import Incomplete def myfunc(): ... class MyClass: data: Incomplete def __init__(self, *args, **kw) -> None: ... @classmethod def demo(cls, **kw): ... @staticmethod def always_fails() -> None: ... Erotemic-xdoctest-fac8308/src/xdoctest/directive.py000066400000000000000000001002611505122333300225030ustar00rootroot00000000000000""" Directives special comments that influence the runtime behavior of doctests. There are two types of directives: block and inline Block directives are specified on their own line and influence the behavior of multiple lines of code. Inline directives are specified after in the same line of code and only influence that line / repl part. Basic Directives ---------------- Basic directives correspond directly to an xdoctest runtime state attribute. These can be modified by directly using the xdoctest directive syntax. The following documents all supported basic directives. The basic directives and their defaults are as follows: * ``DONT_ACCEPT_BLANKLINE``: False, * ``ELLIPSIS``: True, * ``IGNORE_WHITESPACE``: False, * ``IGNORE_EXCEPTION_DETAIL``: False, * ``NORMALIZE_WHITESPACE``: True, * ``IGNORE_WANT``: False, * ``NORMALIZE_REPR``: True, * ``REPORT_CDIFF``: False, * ``REPORT_NDIFF``: False, * ``REPORT_UDIFF``: True, * ``ASYNC``: False, * ``SKIP``: False Use ``-`` to disable a directive that is enabled by default, e.g. ``# xdoctest: -ELLIPSIS``, or use ``+`` to enable a directive that is disabled by default, e.g. ``# xdoctest +SKIP``. Advanced Directives ------------------- Advanced directives may take arguments, be conditional, or modify the runtime state in complex ways. For instance, whereas most directives modify a boolean value in the runtime state, the advanced ``REQUIRES`` directive either adds or removes a value from a ``set`` of unmet requirements. Doctests will only run if there are no unmet requirements. Currently the only advanced directive is ``REQUIRES(.)``. Multiple arguments may be specified, by separating them with commas. The currently available arguments allow you to condition on: * Special operating system / python implementation / python version tags, via: ``WIN32``, ``LINUX``, ``DARWIN``, ``POSIX``, ``NT``, ``JAVA``, ``CPYTHON``, ``IRONPYTHON``, ``JYTHON``, ``PYPY``, ``PY2``, ``PY3``. (e.g. ``# xdoctest +REQUIRES(WIN32)``) * Command line flags, via: ``--``, (e.g. ``# xdoctest +REQUIRES(--verbose)``) * If a python module is installed, via: ``module:``, (e.g. ``# xdoctest +REQUIRES(module:numpy)``) * Environment variables, via: ``env:==``, (e.g. ``# xdoctest +REQUIRES(env:MYENVIRON==1)``) TODO ---- - [ ] Directive for Python version: e.g. xdoctest: +REQUIRES(Python>=3.7) - [ ] Directive for module version: e.g. xdoctest: +REQUIRES(module:rich>=1.0) - [ ] Customize directive. - [ ] Add SKIPIF directive Customized Requirements Design: * Allow user to specify a customized requirement on the CLI or environ. e.g. XDOCTEST_CUSTOM_MY_REQUIRE="import torch; torch.cuda.is_available()" Then xdoctest: +REQUIRES(custom:MY_REQUIRE) would invoke it and enable the missing requirement if that snippet ended with a truthy or falsy value CommandLine: python -m xdoctest.directive __doc__ The following example shows how the ``+SKIP`` directives may be used to bypass certain places in the code. Example: >>> # An inline directive appears on the same line as a command and >>> # only applies to the current line. >>> raise AssertionError('this will not be run (a)') # xdoctest: +SKIP >>> print('This line will print: (A)') >>> print('This line will print: (B)') >>> # However, if a directive appears on its own line, then it applies >>> # too all subsequent lines. >>> # xdoctest: +SKIP() >>> raise AssertionError('this will not be run (b)') >>> print('This line will not print: (A)') >>> # Note, that SKIP is simply a state and can be disabled to allow >>> # the program to continue executing. >>> # xdoctest: -SKIP >>> print('This line will print: (C)') >>> print('This line will print: (D)') >>> # This applies to inline directives as well >>> # xdoctest: +SKIP("an assertion would occur") >>> raise AssertionError('this will not be run (c)') >>> print('This line will print: (E)') # xdoctest: -SKIP >>> raise AssertionError('this will not be run (d)') >>> # xdoctest: -SKIP("a reason can be given as an argument") >>> print('This line will print: (F)') This next examples illustrates how to use the advanced ``+REQUIRES()`` directive. Note, the REQUIRES and SKIP states are independent. Example: >>> import sys >>> plat = sys.platform >>> count = 0 >>> # xdoctest: +REQUIRES(WIN32) >>> assert plat.startswith('win32'), 'this only runs on windows' >>> count += 1 >>> # xdoctest: -REQUIRES(WIN32) >>> # xdoctest: +REQUIRES(LINUX) >>> assert plat.startswith('linux'), 'this only runs on linux' >>> count += 1 >>> # xdoctest: -REQUIRES(LINUX) >>> # xdoctest: +REQUIRES(DARWIN) >>> assert plat.startswith('darwin'), 'this only runs on osx' >>> count += 1 >>> # xdoctest: -REQUIRES(DARWIN) >>> print(count) >>> import sys >>> if any(plat.startswith(n) for n in {'linux', 'win32', 'darwin'}): >>> assert count == 1, 'Exactly one of the above parts should have run' >>> else: >>> assert count == 0, 'Nothing should have run on plat={}'.format(plat) >>> # xdoctest: +REQUIRES(--verbose) >>> print('This is only printed if you run with --verbose') Example: >>> # New in 0.7.3: the requires directive can accept module names >>> # xdoctest: +REQUIRES(module:foobar) """ import sys import os import re import copy import warnings import operator from xdoctest import static_analysis as static from xdoctest import utils from collections import OrderedDict from collections import namedtuple def named(key, pattern): """ helper for regex Args: key (str): pattern (str): Returns: str """ return '(?P<{}>{})'.format(key, pattern) # TODO: modify global directive defaults via a config file DEFAULT_RUNTIME_STATE = { 'DONT_ACCEPT_BLANKLINE': False, 'ELLIPSIS': True, 'IGNORE_WHITESPACE': False, 'IGNORE_EXCEPTION_DETAIL': False, 'NORMALIZE_WHITESPACE': True, 'IGNORE_WANT': False, # 'IGNORE_MEASUREMENTS': False, # TODO: I want this flag to turn on normalization of numbers, # I.E: non-determenistic measurements do not cause doctest failure, but # other formatting errors will. 'NORMALIZE_REPR': True, 'REPORT_CDIFF': False, 'REPORT_NDIFF': False, 'REPORT_UDIFF': True, # If True, all doctests in this context are run in the same event loop. # Otherwise, async doctest blocks are run in independent event loops # and only if a top-level await exists. New in 1.3.0 'ASYNC': False, # Doctests will be skipped while this is True, note that test only run # if this is False and REQUIRES is empty. 'SKIP': False, # Maintains a set unmet dependencies, ie the reasons we are skipping. # Doctests will be skipped while REQUIRES is non-empty and SKIP is False. 'REQUIRES': set(), # Original directives we are currently not supporting: # DONT_ACCEPT_TRUE_FOR_1 # REPORT_ONLY_FIRST_FAILURE # REPORTING_FLAGS # COMPARISON_FLAGS } Effect = namedtuple('Effect', ('action', 'key', 'value')) class RuntimeState(utils.NiceRepr): """ Maintains the runtime state for a single ``run()`` of an example Inline directives are pushed and popped after the line is run. Otherwise directives persist until another directive disables it. CommandLine: xdoctest -m xdoctest.directive RuntimeState Example: >>> from xdoctest.directive import * >>> runstate = RuntimeState() >>> assert not runstate['IGNORE_WHITESPACE'] >>> # Directives modify the runtime state >>> directives = list(Directive.extract('# xdoc: -ELLIPSIS, +IGNORE_WHITESPACE')) >>> runstate.update(directives) >>> assert not runstate['ELLIPSIS'] >>> assert runstate['IGNORE_WHITESPACE'] >>> # Inline directives only persist until the next update >>> directives = [Directive('IGNORE_WHITESPACE', False, inline=True)] >>> runstate.update(directives) >>> assert not runstate['IGNORE_WHITESPACE'] >>> runstate.update({}) >>> assert runstate['IGNORE_WHITESPACE'] Example: >>> # xdoc: +IGNORE_WHITESPACE >>> print(str(RuntimeState())) """ def __init__(self, default_state=None): """ Args: default_state (None | dict): starting default state, if unspecified falls back to the global DEFAULT_RUNTIME_STATE """ self._global_state = copy.deepcopy(DEFAULT_RUNTIME_STATE) if default_state: self._global_state.update(default_state) self._inline_state = {} def to_dict(self): """ Returns: OrderedDict """ state = self._global_state.copy() state.update(self._inline_state) state = OrderedDict(sorted(state.items())) return state def __nice__(self): """ Returns: str """ parts = ['{}: {}'.format(*item) for item in self.to_dict().items()] return ('{' + ', '.join(parts) + '}') def __getitem__(self, key): """ Args: key (str): Returns: Any """ if key not in self._global_state: raise KeyError('Unknown key: {}'.format(key)) if key in self._inline_state: return self._inline_state[key] else: return self._global_state[key] def __setitem__(self, key, value): """ Args: key (str): value (Any): """ if key not in self._global_state: raise KeyError('Unknown key: {}'.format(key)) self._global_state[key] = value def set_report_style(self, reportchoice, state=None): """ Args: reportchoice (str): name of report style state (None | Dict): if unspecified defaults to the global state Example: >>> from xdoctest.directive import * >>> runstate = RuntimeState() >>> assert runstate['REPORT_UDIFF'] >>> runstate.set_report_style('ndiff') >>> assert not runstate['REPORT_UDIFF'] >>> assert runstate['REPORT_NDIFF'] """ # When enabling a report flag, toggle all others off if state is None: state = self._global_state for k in state.keys(): if k.startswith('REPORT_'): state[k] = False state['REPORT_' + reportchoice.upper()] = True def update(self, directives): """ Update the runtime state given a set of directives Args: directives (List[Directive]): list of directives. The ``effects`` method is used to update this object. """ # Clear the previous inline state self._inline_state.clear() for directive in directives: for effect in directive.effects(): action, key, value = effect if action == 'noop': continue if key not in self._global_state: warnings.warn('Unknown state: {}'.format(key)) # Determine if this impacts the local (inline) or global state. if directive.inline: state = self._inline_state else: state = self._global_state if action == 'set_report_style': # Special handling of report style self.set_report_style(key.replace('REPORT_', '')) elif action == 'assign': state[key] = value elif action == 'set.add': state[key].add(value) elif action == 'set.remove': try: state[key].remove(value) except KeyError: pass else: raise KeyError('unknown action {}'.format(action)) class Directive(utils.NiceRepr): """ Directives modify the runtime state. """ def __init__(self, name, positive=True, args=[], inline=None): """ Args: name (str): The name of the directive positive (bool): if it is enabling / disabling args (List[str]): arguments given to the directive inline (bool | None): True if this is an inline directive (i.e. only impacts a single line) """ self.name = name self.args = args self.inline = inline self.positive = positive @classmethod def extract(cls, text): """ Parses directives from a line or repl line Args: text (str): must correspond to exactly one PS1 line and its PS2 followups. Yields: Directive: directive - the parsed directives Note: The original ``doctest`` module sometimes yielded false positives for a directive pattern. Because ``xdoctest`` is parsing the text, this issue does not occur. Example: >>> from xdoctest.directive import Directive, RuntimeState >>> state = RuntimeState() >>> assert len(state['REQUIRES']) == 0 >>> extracted1 = list(Directive.extract('# xdoctest: +REQUIRES(CPYTHON)')) >>> extracted2 = list(Directive.extract('# xdoctest: +REQUIRES(PYPY)')) >>> print('extracted1 = {!r}'.format(extracted1)) >>> print('extracted2 = {!r}'.format(extracted2)) >>> effect1 = extracted1[0].effects()[0] >>> effect2 = extracted2[0].effects()[0] >>> print('effect1 = {!r}'.format(effect1)) >>> print('effect2 = {!r}'.format(effect2)) >>> assert effect1.value == 'CPYTHON' >>> assert effect2.value == 'PYPY' >>> # At least one of these will not be satisfied >>> assert effect1.action == 'set.add' or effect2.action == 'set.add' >>> state.update(extracted1) >>> state.update(extracted2) >>> print('state = {!r}'.format(state)) >>> assert len(state['REQUIRES']) > 0 Example: >>> from xdoctest.directive import Directive >>> text = '# xdoc: + SKIP' >>> print(', '.join(list(map(str, Directive.extract(text))))) >>> # Directive with args >>> text = '# xdoctest: requires(--show)' >>> print(', '.join(list(map(str, Directive.extract(text))))) >>> # Malformatted directives are ignored >>> # xdoctest: +REQUIRES(module:pytest) >>> text = '# xdoctest: does_not_exist, skip' >>> import pytest >>> with pytest.warns(Warning) as record: >>> print(', '.join(list(map(str, Directive.extract(text))))) >>> # Two directives in one line >>> text = '# xdoctest: +ELLIPSIS, -NORMALIZE_WHITESPACE' >>> print(', '.join(list(map(str, Directive.extract(text))))) , >>> # Make sure commas inside parens are not split >>> text = '# xdoctest: +REQUIRES(module:foo,module:bar)' >>> print(', '.join(list(map(str, Directive.extract(text))))) Example: >>> from xdoctest.directive import Directive, RuntimeState >>> any(Directive.extract(' # xdoctest: skip')) True >>> any(Directive.extract(' # badprefix: not-a-directive')) False >>> any(Directive.extract(' # xdoctest: skip')) True >>> any(Directive.extract(' # badprefix: not-a-directive')) False """ # Flag extracted directives as inline iff the text contains non-comments inline = not all(line.strip().startswith('#') for line in text.splitlines()) # for comment in static.extract_comments(text): # remove the first comment character and see if the comment matches # the directive pattern m = DIRECTIVE_RE.match(comment[1:].strip()) if m: for key, optstr in m.groupdict().items(): if optstr: optparts = _split_opstr(optstr) # optparts = optstr.split(',') for optpart in optparts: directive = parse_directive_optstr(optpart, inline) if directive: yield directive def __nice__(self): """ Returns: str """ prefix = ['-', '+'][int(self.positive)] if self.args: argstr = ', '.join(self.args) return '{}{}({})'.format(prefix, self.name, argstr) else: return '{}{}'.format(prefix, self.name) def _unpack_args(self, num): from xdoctest.utils import util_deprecation util_deprecation.schedule_deprecation( modname='xdoctest', name='Directive._unpack_args', type='method', migration='there is no need to use this', deprecate='1.0.0', error='1.1.0', remove='1.2.0' ) nargs = self.args if len(nargs) != 1: raise TypeError( '{} directive expected exactly {} argument(s), ' 'got {}'.format(self.name, num, nargs)) return self.args def effect(self, argv=None, environ=None): from xdoctest.utils import util_deprecation util_deprecation.schedule_deprecation( modname='xdoctest', name='Directive.effect', type='method', migration='Use Directive.effects instead', deprecate='1.0.0', error='1.1.0', remove='1.2.0' ) effects = self.effects(argv=argv, environ=environ) if len(effects) > 1: raise Exception('Old method cannot handle multiple effects') return effects[0] def effects(self, argv=None, environ=None): """ Returns how this directive modifies a RuntimeState object This is called by :func:`RuntimeState.update` to update itself Args: argv (List[str] | None): Command line the directive is interpreted in the context of. If unspecified, uses ``sys.argv``. environ (Dict[str, str] | None): Environment variables the directive is interpreted in the context of. If unspecified, uses ``os.environ``. Returns: List[Effect]: list of named tuples containing: action (str): code indicating how to update key (str): name of runtime state item to modify value (object): value to modify with CommandLine: xdoctest -m xdoctest.directive Directive.effects Example: >>> Directive('SKIP').effects()[0] Effect(action='assign', key='SKIP', value=True) >>> Directive('SKIP', inline=True).effects()[0] Effect(action='assign', key='SKIP', value=True) >>> Directive('REQUIRES', args=['-s']).effects(argv=['-s'])[0] Effect(action='noop', key='REQUIRES', value='-s') >>> Directive('REQUIRES', args=['-s']).effects(argv=[])[0] Effect(action='set.add', key='REQUIRES', value='-s') >>> Directive('ELLIPSIS', args=['-s']).effects(argv=[])[0] Effect(action='assign', key='ELLIPSIS', value=True) Doctest: >>> # requirement directive with module >>> directive = list(Directive.extract('# xdoctest: requires(module:xdoctest)'))[0] >>> print('directive = {}'.format(directive)) >>> print('directive.effects() = {}'.format(directive.effects()[0])) directive = directive.effects() = Effect(action='noop', key='REQUIRES', value='module:xdoctest') >>> directive = list(Directive.extract('# xdoctest: requires(module:notamodule)'))[0] >>> print('directive = {}'.format(directive)) >>> print('directive.effects() = {}'.format(directive.effects()[0])) directive = directive.effects() = Effect(action='set.add', key='REQUIRES', value='module:notamodule') >>> directive = list(Directive.extract('# xdoctest: requires(env:FOO==1)'))[0] >>> print('directive = {}'.format(directive)) >>> print('directive.effects() = {}'.format(directive.effects(environ={})[0])) directive = directive.effects() = Effect(action='set.add', key='REQUIRES', value='env:FOO==1') >>> directive = list(Directive.extract('# xdoctest: requires(env:FOO==1)'))[0] >>> print('directive = {}'.format(directive)) >>> print('directive.effects() = {}'.format(directive.effects(environ={'FOO': '1'})[0])) directive = directive.effects() = Effect(action='noop', key='REQUIRES', value='env:FOO==1') >>> # requirement directive with two args >>> directive = list(Directive.extract('# xdoctest: requires(--show, module:xdoctest)'))[0] >>> print('directive = {}'.format(directive)) >>> for effect in directive.effects(): >>> print('effect = {!r}'.format(effect)) directive = effect = Effect(action='set.add', key='REQUIRES', value='--show') effect = Effect(action='noop', key='REQUIRES', value='module:xdoctest') """ key = self.name value = None effects = [] if self.name == 'REQUIRES': # Special handling of REQUIRES for arg in self.args: value = arg if _is_requires_satisfied(arg, argv=argv, environ=environ): # If the requirement is met, then do nothing, action = 'noop' else: # otherwise, add or remove the condition from REQUIREMENTS, # depending on if the directive is positive or negative. if self.positive: action = 'set.add' else: action = 'set.remove' effects.append(Effect(action, key, value)) elif key.startswith('REPORT_'): # Special handling of report style if self.positive: action = 'noop' else: action = 'set_report_style' effects.append(Effect(action, key, value)) else: # The action overwrites state[key] using value action = 'assign' value = self.positive effects.append(Effect(action, key, value)) return effects def _split_opstr(optstr): """ Simplified balanced paren logic to only split commas outside of parens Args: opstr (str): the command, which may contain more than one directive Returns: List[str]: individual directive optstrings Example: >>> optstr = '+FOO, REQUIRES(foo,bar), +ELLIPSIS' >>> _split_opstr(optstr) ['+FOO', 'REQUIRES(foo,bar)', '+ELLIPSIS'] """ import re stack = [] split_pos = [] for match in re.finditer(r',|\(|\)', optstr): token = match.group() if token == ',' and not stack: # Only split when there are no parens split_pos.append(match.start()) elif token == '(': stack.append(token) elif token == ')': stack.pop() assert len(stack) == 0, 'parens not balanced' parts = [] prev = 0 for curr in split_pos: parts.append(optstr[prev:curr].strip()) prev = curr + 1 curr = None parts.append(optstr[prev:curr].strip()) return parts def _is_requires_satisfied(arg, argv=None, environ=None): """ Determines if the argument to a REQUIRES directive is satisfied Args: arg (str): condition code argv (List[str] | None): cmdline if arg is cmd code usually ``sys.argv`` environ (Dict[str, str] | None): environment variables usually ``os.environ`` Returns: bool: flag - True if the requirement is met Example: >>> from xdoctest.directive import * # NOQA >>> from xdoctest.directive import _is_requires_satisfied >>> _is_requires_satisfied('PY2', argv=[]) >>> _is_requires_satisfied('PY3', argv=[]) >>> _is_requires_satisfied('cpython', argv=[]) >>> _is_requires_satisfied('pypy', argv=[]) >>> _is_requires_satisfied('nt', argv=[]) >>> _is_requires_satisfied('linux', argv=[]) >>> _is_requires_satisfied('env:FOO', argv=[], environ={'FOO': '1'}) True >>> _is_requires_satisfied('env:FOO==1', argv=[], environ={'FOO': '1'}) True >>> _is_requires_satisfied('env:FOO==T', argv=[], environ={'FOO': '1'}) False >>> _is_requires_satisfied('env:BAR', argv=[], environ={'FOO': '1'}) False >>> _is_requires_satisfied('env:BAR==1', argv=[], environ={'FOO': '1'}) False >>> _is_requires_satisfied('env:BAR!=1', argv=[], environ={'FOO': '1'}) True >>> _is_requires_satisfied('env:BAR!=1', argv=[], environ={'BAR': '0'}) True >>> _is_requires_satisfied('env:BAR!=1') ... >>> # xdoctest: +REQUIRES(module:pytest) >>> import pytest >>> with pytest.raises(ValueError): >>> _is_requires_satisfied('badflag:BAR==1', []) >>> import pytest >>> with pytest.raises(KeyError): >>> _is_requires_satisfied('env:BAR>=1', argv=[], environ={'BAR': '0'}) """ # TODO: add python version options # https://docs.python.org/3/library/sys.html#sys.platform SYS_PLATFORM_TAGS = ['win32', 'linux', 'darwin', 'cywgin', 'aix', 'freebsd'] OS_NAME_TAGS = ['posix', 'nt', 'java'] PY_IMPL_TAGS = ['cpython', 'ironpython', 'jython', 'pypy'] # TODO: tox tags: https://tox.readthedocs.io/en/latest/example/basic.html PY_VER_TAGS = ['py2', 'py3'] arg_lower = arg.lower() # TODO: better chained version parsing # Should be able to specify Python>=3.8,Python!=3.8.1,env:HASIT<3 if arg.startswith('-'): if argv is None: argv = sys.argv flag = arg in argv elif arg.startswith('module:'): parts = arg.split(':') if len(parts) != 2: raise ValueError('xdoctest module REQUIRES directive has too many parts') # set flag to False (aka SKIP) if the module does not exist modname = parts[1] flag = _module_exists(modname) elif arg.startswith('env:'): if environ is None: environ = os.environ parts = arg.split(':') if len(parts) != 2: raise ValueError('xdoctest env REQUIRES directive has too many parts') envexpr = parts[1] expr_parts = re.split('(==|!=|>=)', envexpr) if len(expr_parts) == 1: # Test if the environment variable is truthy env_key = expr_parts[0] flag = bool(environ.get(env_key, None)) elif len(expr_parts) == 3: # Test if the environment variable is equal to an expression env_key, op_code, value = expr_parts env_val = environ.get(env_key, None) if op_code == '==': op = operator.eq elif op_code == '!=': op = operator.ne else: raise KeyError(op_code) flag = op(env_val, value) else: raise ValueError('Too many expr_parts={}'.format(expr_parts)) elif arg_lower in SYS_PLATFORM_TAGS: flag = sys.platform.lower().startswith(arg_lower) elif arg_lower in OS_NAME_TAGS: flag = os.name.startswith(arg_lower) elif arg_lower in PY_IMPL_TAGS: import platform flag = platform.python_implementation().lower().startswith(arg_lower) elif arg_lower in PY_VER_TAGS: if sys.version_info[0] == 2: # nocover flag = arg_lower == 'py2' elif sys.version_info[0] == 3: # pragma: nobranch flag = arg_lower == 'py3' else: # nocover flag = False else: msg = utils.codeblock( ''' Argument to REQUIRES directive must be either (1) a PLATFORM or OS tag (e.g. win32, darwin, linux), (2) a command line flag prefixed with '--', or (3) a module prefixed with 'module:'. (4) an environment variable prefixed with 'env:'. Got arg={!r} ''').replace('\n', ' ').strip().format(arg) raise ValueError(msg) return flag _MODNAME_EXISTS_CACHE = {} def _module_exists(modname): """ Args: modname (str): Returns: bool """ if modname not in _MODNAME_EXISTS_CACHE: from xdoctest import static_analysis as static modpath = static.modname_to_modpath(modname) exists_flag = modpath is not None _MODNAME_EXISTS_CACHE[modname] = exists_flag exists_flag = _MODNAME_EXISTS_CACHE[modname] return exists_flag # __docstubs__ = ''' # import re # if hasattr(re, 'Pattern'): # RE_Pattern = re.Pattern # else: # # sys.version_info[0:2] <= 3.6 # RE_Pattern = type(re.compile('.*')) # DIRECTIVE_RE: RE_Pattern # DIRECTIVE_PATTERNS: list # COMMANDS: list # ''' COMMANDS = list(DEFAULT_RUNTIME_STATE.keys()) + [ # Define extra commands that can resolve to a runtime state modification 'REQUIRES', ] DIRECTIVE_PATTERNS = [ #r'\s*\+\s*' + named('style1', '.*'), r'x?doctest:\s*' + named('style2', '.*'), r'x?doc:\s*' + named('style3', '.*'), ] DIRECTIVE_RE = re.compile('|'.join(DIRECTIVE_PATTERNS), flags=re.IGNORECASE) def parse_directive_optstr(optpart, inline=None): """ Parses the information in the directive from the "optpart" optstrs are: optionally prefixed with ``+`` (default) or ``-`` comma separated may contain one paren enclosed argument (experimental) all spaces are ignored Args: optpart (str): the string corresponding to the operation inline (None | bool): True if the directive only applies to a single line. Returns: Directive: the parsed directive Example: >>> print(str(parse_directive_optstr('+IGNORE_WHITESPACE'))) """ optpart = optpart.strip() # all spaces are ignored optpart = optpart.replace(' ', '') paren_pos = optpart.find('(') if paren_pos > -1: # handle simple paren case. body = optpart[paren_pos + 1:optpart.find(')')] args = [a.strip() for a in body.split(',')] # args = [optpart[paren_pos + 1:optpart.find(')')]] optpart = optpart[:paren_pos] else: args = [] # Determine if the option starts with + or - (we assume + by default) if optpart.startswith(('+', '-')): positive = not optpart.startswith('-') name = optpart[1:] else: positive = True name = optpart name = name.upper() if name not in COMMANDS: msg = 'Unknown directive: {!r}'.format(optpart) warnings.warn(msg) else: directive = Directive(name, positive, args, inline) return directive if __name__ == '__main__': """ CommandLine: python -m xdoctest.directive all """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/src/xdoctest/directive.pyi000066400000000000000000000035761505122333300226670ustar00rootroot00000000000000from typing import Dict from collections import OrderedDict from typing import Any from typing import List from _typeshed import Incomplete from collections.abc import Generator from typing import Any, NamedTuple from xdoctest import utils def named(key: str, pattern: str) -> str: ... DEFAULT_RUNTIME_STATE: Incomplete class Effect(NamedTuple): action: Incomplete key: Incomplete value: Incomplete class RuntimeState(utils.NiceRepr): def __init__(self, default_state: None | dict = None) -> None: ... def to_dict(self) -> OrderedDict: ... def __nice__(self) -> str: ... def __getitem__(self, key: str) -> Any: ... def __setitem__(self, key: str, value: Any) -> None: ... def set_report_style(self, reportchoice: str, state: None | Dict = None) -> None: ... def update(self, directives: List[Directive]) -> None: ... class Directive(utils.NiceRepr): name: str args: List[str] inline: bool | None positive: bool def __init__(self, name: str, positive: bool = True, args: List[str] = ..., inline: bool | None = None) -> None: ... @classmethod def extract(cls, text: str) -> Generator[Directive, None, None]: ... def __nice__(self) -> str: ... def effect(self, argv: Incomplete | None = ..., environ: Incomplete | None = ...): ... def effects(self, argv: List[str] | None = None, environ: Dict[str, str] | None = None) -> List[Effect]: ... COMMANDS: Incomplete DIRECTIVE_PATTERNS: Incomplete DIRECTIVE_RE: Incomplete def parse_directive_optstr(optpart: str, inline: None | bool = None) -> Directive: ... Erotemic-xdoctest-fac8308/src/xdoctest/docstr/000077500000000000000000000000001505122333300214515ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/src/xdoctest/docstr/__init__.py000066400000000000000000000010631505122333300235620ustar00rootroot00000000000000from xdoctest.docstr import docscrape_google from xdoctest.docstr.docscrape_google import (parse_google_argblock, parse_google_args, parse_google_retblock, parse_google_returns, split_google_docblocks,) __all__ = ['docscrape_google', 'parse_google_argblock', 'parse_google_args', 'parse_google_retblock', 'parse_google_returns', 'split_google_docblocks'] Erotemic-xdoctest-fac8308/src/xdoctest/docstr/docscrape_google.py000066400000000000000000000474501505122333300253340ustar00rootroot00000000000000""" Handles parsing of information out of google style docstrings It is not clear which of these `GoogleStyleDocs1`_ `GoogleStyleDocs2`_ is *the* standard or if there is one. This code has been exported to a standalone package * https://github.com/Erotemic/googledoc This is similar to: * https://pypi.org/project/docstring-parser/ * https://pypi.org/project/numpydoc/ It hasn't been decided if this will remain vendored in xdoctest or pulled in as a dependency. References: .. [GoogleStyleDocs1] https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html#example-google .. [GoogleStyleDocs2] http://www.sphinx-doc.org/en/stable/ext/example_google.html#example-google """ import re import textwrap import collections from xdoctest import exceptions from xdoctest.utils.util_str import ensure_unicode DocBlock = collections.namedtuple('DocBlock', ['text', 'offset']) def split_google_docblocks(docstr): """ Breaks a docstring into parts defined by google style Args: docstr (str): a docstring Returns: List[Tuple[str, DocBlock]]: list of 2-tuples where the first item is a google style docstring tag and the second item is the bock corresponding to that tag. The block itself is a 2-tuple where the first item is the unindented text and the second item is the line offset indicating that blocks location in the docstring. Note: Unknown or "freeform" sections are given a generic "__DOC__" tag. A section tag may be specified multiple times. CommandLine: xdoctest xdoctest.docstr.docscrape_google split_google_docblocks:2 Example: >>> from xdoctest.docstr.docscrape_google import * # NOQA >>> from xdoctest import utils >>> docstr = utils.codeblock( ... ''' ... one line description ... ... multiline ... description ... ... Args: ... foo: bar ... ... Returns: ... None ... ... Example: ... >>> print('eg1') ... eg1 ... ... Example: ... >>> print('eg2') ... eg2 ... ''') >>> groups = split_google_docblocks(docstr) >>> assert len(groups) == 5 >>> [g[0] for g in groups] ['__DOC__', 'Args', 'Returns', 'Example', 'Example'] Example: >>> from xdoctest.docstr.docscrape_google import * # NOQA >>> docstr = split_google_docblocks.__doc__ >>> groups = split_google_docblocks(docstr) Example: >>> from xdoctest.docstr.docscrape_google import * # NOQA >>> from xdoctest import utils >>> docstr = utils.codeblock( ... ''' ... a description with a leading space ... ... Example: ... >>> foobar ... ''') >>> groups = split_google_docblocks(docstr) >>> print('groups = {!r}'.format(groups)) Example: >>> from xdoctest.docstr.docscrape_google import * # NOQA >>> from xdoctest import utils >>> docstr = utils.codeblock( ... ''' ... Example: ... >>> foobar ... ''') >>> # Check that line offsets are valid if the first line is not blank >>> groups = split_google_docblocks(docstr) >>> offset = groups[0][1][1] >>> print('offset = {!r}'.format(offset)) >>> assert offset == 0 >>> # Check that line offsets are valid if the first line is blank >>> groups = split_google_docblocks(chr(10) + docstr) >>> offset = groups[0][1][1] >>> print('offset = {!r}'.format(offset)) >>> assert offset == 1 """ if not isinstance(docstr, str): raise TypeError('Input docstr must be a string. Got {} instead'.format( type(docstr))) def get_indentation(line_): """ returns number of preceding spaces """ return len(line_) - len(line_.lstrip()) # Parse out initial documentation lines # Then parse out the blocked lines. docstr = ensure_unicode(docstr) docstr = textwrap.dedent(docstr) docstr_lines = docstr.split('\n') line_indent = [get_indentation(line) for line in docstr_lines] line_len = [len(line) for line in docstr_lines] # The first line may not have the correct indentation if it starts # right after the triple quotes. Adjust it in this case to ensure that # base indent is always 0 adjusted = False is_nonzero = [len_ > 0 for len_ in line_len] if len(line_indent) >= 2: if line_len[0] != 0: indents = [x for x, f in zip(line_indent, is_nonzero) if f] if len(indents) >= 2: indent_adjust = min(indents[1:]) line_indent[0] += indent_adjust line_len[0] += indent_adjust docstr_lines[0] = (' ' * indent_adjust) + docstr_lines[0] adjusted = True if adjusted: # Redo prepreocessing, but this time on a rectified input docstr = textwrap.dedent('\n'.join(docstr_lines)) docstr_lines = docstr.split('\n') line_indent = [get_indentation(line) for line in docstr_lines] line_len = [len(line) for line in docstr_lines] indents = [x for x, f in zip(line_indent, is_nonzero) if f] if False and len(indents) >= 1: if indents[0] != 0: # debug info print('INDENTATION ERROR IN PARSING DOCSTRING') print('CHECK TO MAKE SURE YOU USED A RAW STRING IF YOU USE "\\n"') # TODO: Report this error with line number and file information print('Docstring:') print('----------') print(docstr) print('----------') raise exceptions.MalformedDocstr('malformed google docstr') base_indent = 0 # We will group lines by their indentation. # Rectify empty lines by giving them their parent's indentation. true_indent = [] prev_indent = None for indent_, len_ in zip(line_indent, line_len): if len_ == 0: # Empty lines take on their parents indentation indent_ = prev_indent true_indent.append(indent_) prev_indent = indent_ # List of google style tags grouped by alias tag_groups = [ ['Args', 'Arguments', 'Parameters', 'Other Parameters'], ['Kwargs', 'Keyword Args', 'Keyword Arguments'], ['Warns', 'Warning', 'Warnings'], ['Returns', 'Return'], ['Example', 'Examples'], ['Doctest'], ['Note', 'Notes'], ['Yields', 'Yield'], ['Attributes'], ['Methods'], ['Raises'], ['References'], ['See Also'], ['Todo'], ] # Map aliased tags to a canonical name (the first item in the group). tag_aliases = dict([(item, group[0]) for group in tag_groups for item in group]) # Allow for single or double colon (support for pytorch) tag_pattern = '^' + '(' + '|'.join(tag_aliases.keys()) + ') *::? *$' # Label lines by their group-id group_id = 0 prev_indent = 0 group_list = [] in_tag = False for line_num, (line, indent_) in enumerate(zip(docstr_lines, true_indent)): if re.match(tag_pattern, line): # Check if we can look ahead if line_num + 1 < len(docstr_lines): # A tag is only valid if its next line is properly indented, # empty, or is a tag itself. indent_increase = true_indent[line_num + 1] > base_indent indent_zero = line_len[line_num + 1] == 0 matches_tag = re.match(tag_pattern, docstr_lines[line_num + 1]) if (indent_increase or indent_zero or matches_tag): group_id += 1 in_tag = True else: group_id += 1 in_tag = True # If the indentation goes back to the base, then we have left the tag elif in_tag and indent_ != prev_indent and indent_ == base_indent: group_id += 1 in_tag = False group_list.append(group_id) prev_indent = indent_ assert len(docstr_lines) == len(group_list) # Group docstr lines by group list groups_ = collections.defaultdict(list) for groupid, line in zip(group_list, docstr_lines): groups_[groupid].append(line) groups = [] line_offset = 0 for k, lines in groups_.items(): if len(lines) == 0 or (len(lines) == 1 and len(lines[0]) == 0): line_offset += len(lines) continue elif len(lines) >= 1 and re.match(tag_pattern, lines[0]): # An encoded google sub-block key = lines[0].strip().rstrip(':') val = lines[1:] subblock = textwrap.dedent('\n'.join(val)) else: # A top level text documentation block key = '__DOC__' val = lines[:] subblock = '\n'.join(val) key = tag_aliases.get(key, key) block = DocBlock(subblock, line_offset) groups.append((key, block)) line_offset += len(lines) return groups def parse_google_args(docstr): r""" Generates dictionaries of argument hints based on a google docstring Args: docstr (str): a google-style docstring Yields: Dict[str, str]: dictionaries of parameter hints Example: >>> docstr = parse_google_args.__doc__ >>> argdict_list = list(parse_google_args(docstr)) >>> print([sorted(d.items()) for d in argdict_list]) [[('desc', 'a google-style docstring'), ('name', 'docstr'), ('type', 'str')]] """ blocks = split_google_docblocks(docstr) for key, block in blocks: lines = block[0] if key == 'Args': for argdict in parse_google_argblock(lines): yield argdict def parse_google_returns(docstr, return_annot=None): r""" Generates dictionaries of possible return hints based on a google docstring Args: docstr (str): a google-style docstring return_annot (str | None): the return type annotation (if one exists) Yields: Dict[str, str]: dictionaries of return value hints Example: >>> docstr = parse_google_returns.__doc__ >>> retdict_list = list(parse_google_returns(docstr)) >>> print([sorted(d.items()) for d in retdict_list]) [[('desc', 'dictionaries of return value hints'), ('type', 'Dict[str, str]')]] Example: >>> docstr = split_google_docblocks.__doc__ >>> retdict_list = list(parse_google_returns(docstr)) >>> print([sorted(d.items())[1] for d in retdict_list]) [('type', 'List[Tuple[str, DocBlock]]')] """ blocks = split_google_docblocks(docstr) for key, block in blocks: lines = block[0] if key == 'Returns': for retdict in parse_google_retblock(lines, return_annot): yield retdict if key == 'Yields': for retdict in parse_google_retblock(lines, return_annot): yield retdict def parse_google_retblock(lines, return_annot=None): r""" Parse information out of a returns or yields block. A returns or yeids block should be formatted as one or more ``'{type}:{description}'`` strings. The description can occupy multiple lines, but the indentation should increase. Args: lines (str): unindented lines from a Returns or Yields section return_annot (str | None): the return type annotation (if one exists) Yields: Dict[str, str]: each dict specifies the return type and its description Example: >>> # Test various ways that retlines can be written >>> assert len(list(parse_google_retblock('list: a desc'))) == 1 >>> # --- >>> hints = list(parse_google_retblock('\n'.join([ ... 'entire line can be desc', ... ' ', ... ' if a return type annotation is given', ... ]), return_annot='int')) >>> assert len(hints) == 1 >>> # --- >>> hints = list(parse_google_retblock('\n'.join([ ... 'bool: a description', ... ' with a newline', ... ]))) >>> assert len(hints) == 1 >>> # --- >>> hints = list(parse_google_retblock('\n'.join([ ... 'int or bool: a description', ... ' ', ... ' with a separated newline', ... ' ', ... ]))) >>> assert len(hints) == 1 >>> # --- >>> hints = list(parse_google_retblock('\n'.join([ ... # Multiple types can be specified ... 'threading.Thread: a description', ... '(int, str): a tuple of int and str', ... 'tuple: a tuple of int and str', ... 'Tuple[int, str]: a tuple of int and str', ... ]))) >>> assert len(hints) == 4 >>> # --- >>> # If the colon is not specified nothing will be parsed >>> # according to the "official" spec, but lets try and do it anyway >>> hints = list(parse_google_retblock('\n'.join([ ... 'list', ... 'Tuple[int, str]', ... ]))) >>> assert len(hints) == 2 >>> assert len(list(parse_google_retblock('no type, just desc'))) == 1 ... """ if return_annot is not None: # If the function has a return type annotation then the return block # should only be interpreted as a description. The formatting of the # lines is not modified in this case. retdict = {'type': return_annot, 'desc': lines} yield retdict else: # Otherwise, this examines each line without any extra indentation (wrt # the returns block) splits each line using a colon, and interprets # anything to the left of the colon as the type hint. The rest of the # parts are the description. Extra whitespace is removed from the # descriptions. def finalize(retdict): final_desc = ' '.join([p for p in retdict['desc'] if p]) retdict['desc'] = final_desc return retdict retdict = None noindent_pat = re.compile(r'^[^\s]') for line in lines.split('\n'): # Lines without indentation should declare new type hints if noindent_pat.match(line): if retdict is not None: # Finalize and return any previously constructed type hint yield finalize(retdict) retdict = None # FIXME: # This doesn't quite work if ":" is part of the type # definition. Not sure if it can be. Needs better parsing # to ensure the ":" is actually the separator between # type and desc if ':' in line: parts = line.split(':') retdict = { 'type': parts[0].strip(), 'desc': [':'.join(parts[1:]).strip()], } else: # warning (malformatted google docstring) We should support # the case where they just specify the type and no # description. USE_TYPE_HACK = 1 if USE_TYPE_HACK: import ast try: ast.parse(line.strip()) except Exception: # Not parseable, assume this is a description. retdict = { 'type': None, 'desc': [line.strip()], } else: # Parseable, assume this is a type retdict = { 'type': line.strip(), 'desc': [], } else: # Lines with indentation should extend previous descriptions. if retdict is not None: retdict['desc'].append(line.strip()) if retdict is not None: yield finalize(retdict) def parse_google_argblock(lines, clean_desc=True): r""" Parse out individual items from google-style args blocks. Args: lines (str): the unindented lines from an Args docstring section clean_desc (bool): if True, will strip the description of newlines and indents. Defaults to True. Yields: Dict[str, str | None]: A dictionary containing keys, "name", "type", and "desc" corresponding to an argument in the Args block. Example: >>> # Test various ways that arglines can be written >>> line_list = [ ... '', ... 'foo1 (int): a description', ... 'foo2: a description\n with a newline', ... 'foo3 (int or str): a description', ... 'foo4 (int or threading.Thread): a description', ... # ... # this is sphynx-like typing style ... 'param1 (:obj:`str`, optional): ', ... 'param2 (:obj:`list` of :obj:`str`):', ... # ... # the Type[type] syntax is defined by the python typeing module ... 'attr1 (Optional[int]): Description of `attr1`.', ... 'attr2 (List[str]): Description of `attr2`.', ... 'attr3 (Dict[str, str]): Description of `attr3`.', ... '*args : variable positional args description', ... '**kwargs : keyword arguments description', ... 'malformed and unparseable', ... 'param_no_desc1', # todo: this should be parseable ... 'param_no_desc2:', ... 'param_no_desc3 ()', # todo: this should be parseable ... 'param_no_desc4 ():', ... 'param_no_desc5 (str)', # todo: this should be parseable ... 'param_no_desc6 (str):', ... ] >>> lines = '\n'.join(line_list) >>> argdict_list = list(parse_google_argblock(lines)) >>> # All lines except the first should be accepted >>> assert len(argdict_list) == len(line_list) - 5 >>> assert argdict_list[1]['desc'] == 'a description with a newline' """ def named(key, pattern): return '(?P<{}>{})'.format(key, pattern) def optional(pattern): return '({})?'.format(pattern) def positive_lookahead(pattern): return '(?={})'.format(pattern) def regex_or(patterns): return '({})'.format('|'.join(patterns)) whitespace = r'\s*' endofstr = r'\Z' # Define characters that can be part of variable / type names # Note: a variable name might be prefixed with 0, 1, or 2, `*` to indicate # *args or **kwargs varname = named('name', r'\*?\*?[A-Za-z_][A-Za-z0-9_]*') typename = named('type', '[^)]*?') argdesc = named('desc', '.*?') # Types are optional, and must be enclosed in parens optional_type = optional(whitespace.join([r'\(', typename, r'\)'])) # Each arg hint must defined a on newline without any indentation argdef = whitespace.join([varname, optional_type, ':']) # the description is everything after the colon until either the next line # without any indentation or the end of the string end_desc = regex_or(['^' + positive_lookahead(r'[^\s]'), endofstr]) flags = re.MULTILINE | re.DOTALL argline_pat = re.compile('^' + argdef + argdesc + end_desc, flags=flags) for match in argline_pat.finditer(lines): argdict = match.groupdict() # Clean description if clean_desc: desc_lines = [p.strip() for p in argdict['desc'].split('\n')] argdict['desc'] = ' '.join([p for p in desc_lines if p]) yield argdict Erotemic-xdoctest-fac8308/src/xdoctest/docstr/docscrape_google.pyi000066400000000000000000000015111505122333300254710ustar00rootroot00000000000000from typing import List from typing import Tuple from typing import Dict from _typeshed import Incomplete from collections.abc import Generator from typing import Any, NamedTuple class DocBlock(NamedTuple): text: Incomplete offset: Incomplete def split_google_docblocks(docstr: str) -> List[Tuple[str, DocBlock]]: ... def parse_google_args(docstr: str) -> Generator[Dict[str, str], None, None]: ... def parse_google_returns( docstr: str, return_annot: str | None = None ) -> Generator[Dict[str, str], None, None]: ... def parse_google_retblock( lines: str, return_annot: str | None = None ) -> Generator[Dict[str, str], None, Any]: ... def parse_google_argblock( lines: str, clean_desc: bool = True ) -> Generator[Dict[str, str | None], None, Any]: ... Erotemic-xdoctest-fac8308/src/xdoctest/docstr/docscrape_numpy.py000066400000000000000000000000701505122333300252130ustar00rootroot00000000000000# https://numpydoc.readthedocs.io/en/latest/format.html Erotemic-xdoctest-fac8308/src/xdoctest/docstr/docscrape_numpy.pyi000066400000000000000000000000011505122333300253560ustar00rootroot00000000000000 Erotemic-xdoctest-fac8308/src/xdoctest/doctest_example.py000066400000000000000000001623221505122333300237130ustar00rootroot00000000000000""" This module defines the main class that holds a DocTest example """ import __future__ import ast from collections import OrderedDict import traceback import warnings import math import sys import re import types from inspect import CO_COROUTINE from xdoctest import utils from xdoctest import directive from xdoctest import constants from xdoctest import static_analysis as static from xdoctest import parser from xdoctest import checker from xdoctest import exceptions from xdoctest import global_state __devnotes__ = """ TODO: - [ ] Rename DocTest to Doctest? - Probably not, its been years. - [ ] I dont like having "example" as a suffix to this modname, can we rename? - Probably not, its been years. """ __docstubs__ = """ from xdoctest.doctest_part import DoctestPart """ class DoctestConfig(dict): """ Doctest configuration Static configuration for collection, execution, and reporting doctests. Note dynamic directives are not managed by DoctestConfig, they use RuntimeState. """ def __init__(self, *args, **kwargs): super(DoctestConfig, self).__init__(*args, **kwargs) self.update({ # main options exposed by command line runner/plugin 'colored': hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(), 'reportchoice': 'udiff', 'default_runtime_state': {}, 'offset_linenos': False, 'global_exec': None, 'supress_import_errors': False, 'on_error': 'raise', 'partnos': False, 'verbose': 1, }) def _populate_from_cli(self, ns): from xdoctest.directive import parse_directive_optstr directive_optstr = ns['options'] default_runtime_state = {} if directive_optstr: for optpart in directive_optstr.split(','): directive = parse_directive_optstr(optpart) if directive is None: raise Exception( 'Failed to parse directive given in the xdoctest "options"' 'directive_optstr={!r}'.format(directive_optstr) ) default_runtime_state[directive.name] = directive.positive _examp_conf = { 'default_runtime_state': default_runtime_state, 'offset_linenos': ns['offset_linenos'], 'colored': ns['colored'], 'reportchoice': ns['reportchoice'], 'global_exec': ns['global_exec'], 'supress_import_errors': ns['supress_import_errors'], 'verbose': ns['verbose'], } return _examp_conf def _update_argparse_cli(self, add_argument, prefix=None, defaults={}): """ Updates a pytest or argparse CLI Args: add_argument (callable): the parser.add_argument function """ import argparse def str_lower(x): # python2 fix return str.lower(str(x)) add_argument_kws = [ (['--colored'], dict(dest='colored', default=self['colored'], help=('Enable or disable ANSI coloration in stdout'))), (['--nocolor'], dict(dest='colored', action='store_false', default=argparse.SUPPRESS, help=('Disable ANSI coloration in stdout'))), (['--offset'], dict(dest='offset_linenos', action='store_true', default=self['offset_linenos'], help=('If True formatted source linenumbers will agree with ' 'their location in the source file. Otherwise they ' 'will be relative to the doctest itself.'))), (['--report'], dict(dest='reportchoice', type=str_lower, choices=('none', 'cdiff', 'ndiff', 'udiff', 'only_first_failure',), default=self['reportchoice'], help=('Choose another output format for diffs on xdoctest failure'))), # used to build default_runtime_state (['--options'], dict(type=str_lower, default=None, dest='options', help='Default directive flags for doctests')), (['--global-exec'], dict(type=str, default=None, dest='global_exec', help='Custom Python code to execute before every test')), # FIXME: this has a spelling error (['--supress-import-errors'], dict(dest='supress_import_errors', action='store_true', default=self['supress_import_errors'], help='Removes tracebacks from errors in implicit imports')), (['--verbose'], dict( type=int, default=defaults.get('verbose', 3), dest='verbose', help=( 'Verbosity level. ' '0 is silent, ' '1 prints out test names, ' '2 additionally prints test stdout, ' '3 additionally prints test source'))), (['--quiet'], dict(action='store_true', dest='verbose', default=argparse.SUPPRESS, help='sets verbosity to 1')), (['--silent'], dict(action='store_false', dest='verbose', default=argparse.SUPPRESS, help='sets verbosity to 0')), ] if prefix is None: prefix = [''] # TODO: make environment variables as args more general import os environ_aware = {'report', 'options', 'global-exec', 'verbose'} for alias, kw in add_argument_kws: # Use environment variables for some defaults argname = alias[0].lstrip('-') if argname in environ_aware: env_argname = 'XDOCTEST_' + argname.replace('-', '_').upper() if 'default' in kw: kw['default'] = os.environ.get(env_argname, kw['default']) alias = [ a.replace('--', '--' + p + '-') if p else a for a in alias for p in prefix ] if prefix[0]: kw['dest'] = prefix[0] + '_' + kw['dest'] add_argument(*alias, **kw) def getvalue(self, key, given=None): """ Args: key (str): The configuration key given (Any): A user override Returns: Any: if given is None returns the configured value """ if given is None: return self[key] else: return given class DocTest: """ Holds information necessary to execute and verify a doctest Attributes: docsrc (str): doctest source code modpath (str | PathLike): module the source was read from callname (str): name of the function/method/class/module being tested num (int): the index of the doctest in the docstring. (i.e. this object refers to the num-th doctest within a docstring) lineno (int): The line (starting from 1) in the file that the doctest begins on. (i.e. if you were to go to this line in the file, the first line of the doctest should be on this line). fpath (PathLike): Typically the same as modpath, only specified for non-python files (e.g. rst files). block_type (str | None): Hint indicating the type of docstring block. Can be ('Example', 'Doctest', 'Script', 'Benchmark', 'zero-arg', etc..). mode (str): Hint at what created / is running this doctest. This impacts how results are presented and what doctests are skipped. Can be "native" or "pytest". Defaults to "pytest". config (DoctestConfig): configuration for running / checking the doctest module (ModuleType | None): a reference to the module that contains the doctest modname (str): name of the module that contains the doctest. failed_tb_lineno (int | None): Line number a failure occurred on. exc_info (None | TracebackType): traceback of a failure if one occurred. failed_part (None | DoctestPart): the part containing the failure if one occurred. warn_list (list): from :func:`warnings.catch_warnings` logged_evals (OrderedDict): Mapping from part index to what they evaluated to (if anything) logged_stdout (OrderedDict): Mapping from part index to captured stdout. global_namespace (dict): globals visible to the doctest CommandLine: xdoctest -m xdoctest.doctest_example DocTest Example: >>> from xdoctest import core >>> from xdoctest import doctest_example >>> import os >>> modpath = doctest_example.__file__.replace('.pyc', '.py') >>> modpath = os.path.realpath(modpath) >>> testables = core.parse_doctestables(modpath) >>> for test in testables: >>> if test.callname == 'DocTest': >>> self = test >>> break >>> assert self.num == 0 >>> assert self.modpath == modpath >>> print(self) """ # Constant values for unknown attributes UNKNOWN_MODNAME = '' UNKNOWN_MODPATH = '' UNKNOWN_CALLNAME = '' UNKNOWN_FPATH = '' def __init__(self, docsrc, modpath=None, callname=None, num=0, lineno=1, fpath=None, block_type=None, mode='pytest'): """ Args: docsrc (str): the text of the doctest modpath (str | PathLike | None): callname (str | None): num (int): lineno (int): fpath (str | None): block_type (str | None): mode (str): """ # if we know the google block type it is recorded self.block_type = block_type self.config = DoctestConfig() self.module = None self.modpath = modpath self.fpath = fpath if modpath is None: self.modname = self.UNKNOWN_MODNAME self.modpath = self.UNKNOWN_MODPATH elif isinstance(modpath, types.ModuleType): self.fpath = modpath self.module = modpath self.modname = modpath.__name__ self.modpath = getattr(self.module, '__file__', self.UNKNOWN_MODPATH) else: if fpath is not None: if fpath != modpath: raise AssertionError( 'only specify fpath for non-python files') self.fpath = modpath self.modname = static.modpath_to_modname(modpath) if callname is None: self.callname = self.UNKNOWN_CALLNAME else: self.callname = callname self.docsrc = docsrc self.lineno = lineno self.num = num self._parts = None self.failed_tb_lineno = None self.exc_info = None self.failed_part = None self.warn_list = None self._partfilename = None self.logged_evals = OrderedDict() self.logged_stdout = OrderedDict() self._unmatched_stdout = [] self._skipped_parts = [] self._runstate = None # Maintain global variables that this test will have access to self.global_namespace = {} # Hint at what is running this doctest self.mode = mode def __nice__(self): """ Returns: str """ parts = [] parts.append(self.modname) parts.append('%s:%s' % (self.callname, self.num)) if self.lineno is not None: parts.append('ln %s' % (self.lineno)) return ' '.join(parts) def __repr__(self): """ Returns: str """ classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s) at %s>' % (classname, devnice, hex(id(self))) def __str__(self): """ Returns: str """ classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s)>' % (classname, devnice) def is_disabled(self, pytest=False): """ Checks for comment directives on the first line of the doctest A doctest is force-disabled if it starts with any of the following patterns * ``>>> # DISABLE_DOCTEST`` * ``>>> # SCRIPT`` * ``>>> # UNSTABLE`` * ``>>> # FAILING`` And if running in pytest, you can also use * ``>>> import pytest; pytest.skip()`` Note: modern versions of xdoctest contain directives like `# xdoctest: +SKIP`, which are a better way to do this. TODO: Robustly deprecate these non-standard ways of disabling a doctest. Generate a warning for several versions if they are used, and indicate what the replacement strategy is. Then raise an error for several more versions before finally removing this code. Returns: bool: """ disable_patterns = [ r'>>>\s*#\s*DISABLE', r'>>>\s*#\s*UNSTABLE', r'>>>\s*#\s*FAILING', r'>>>\s*#\s*SCRIPT', r'>>>\s*#\s*SLOW_DOCTEST', # r'>>>\s*#\s*x?doctest:\s\+SKIP', ] if pytest: disable_patterns += [ r'>>>\s*#\s*pytest.skip' ] pattern = '|'.join(disable_patterns) m = re.match(pattern, self.docsrc, flags=re.IGNORECASE) return m is not None @property def unique_callname(self): """ A key that references this doctest given its module Returns: str """ return self.callname + ':' + str(self.num) @property def node(self): """ A key that references this doctest within pytest Returns: str """ return self.modpath + '::' + self.callname + ':' + str(self.num) @property def valid_testnames(self): """ A set of callname and unique_callname Returns: Set[str] """ return { self.callname, self.unique_callname, } def wants(self): """ Returns a list of the populated wants Yields: str """ self._parse() for part in self._parts: if part.want: yield part.want def format_parts(self, linenos=True, colored=None, want=True, offset_linenos=None, prefix=True): """ Used by :func:`format_src` Args: linenos (bool): show line numbers colored (bool | None): pygmentize the code want (bool): include the want value if it exists offset_linenos (bool): if True include the line offset relative to the source file prefix (bool): if False, exclude the doctest ``>>> `` prefix """ self._parse() colored = self.config.getvalue('colored', colored) partnos = self.config.getvalue('partnos') offset_linenos = self.config.getvalue('offset_linenos', offset_linenos) n_digits = None startline = 1 if linenos: if offset_linenos: startline = self.lineno n_lines = sum(p.n_lines for p in self._parts) endline = startline + n_lines n_digits = math.log(max(1, endline), 10) n_digits = int(math.ceil(n_digits)) for part in self._parts: part_text = part.format_part(linenos=linenos, want=want, startline=startline, n_digits=n_digits, prefix=prefix, colored=colored, partnos=partnos) yield part_text def format_src(self, linenos=True, colored=None, want=True, offset_linenos=None, prefix=True): """ Adds prefix and line numbers to a doctest Args: linenos (bool): if True, adds line numbers to output colored (bool): if True highlight text with ansi colors. Default is specified in the config. want (bool): if True includes "want" lines (default False). offset_linenos (bool): if True offset line numbers to agree with their position in the source text file (default False). prefix (bool): if False, exclude the doctest ``>>> `` prefix Returns: str Example: >>> from xdoctest.core import * >>> from xdoctest import core >>> testables = parse_doctestables(core.__file__) >>> self = next(testables) >>> self._parse() >>> print(self.format_src()) >>> print(self.format_src(linenos=False, colored=False)) >>> assert not self.is_disabled() """ formated_parts = list(self.format_parts(linenos=linenos, colored=colored, want=want, offset_linenos=offset_linenos, prefix=prefix)) full_source = '\n'.join(formated_parts) return full_source def _parse(self): """ Divide the given string into examples and intervening text. Returns: None Example: >>> s = 'I am a dummy example with three parts' >>> x = 10 >>> print(s) I am a dummy example with three parts >>> s = 'My purpose it so demonstrate how wants work here' >>> print('The new want applies ONLY to stdout') >>> print('given before the last want') >>> ''' this wont hurt the test at all even though its multiline ''' >>> y = 20 The new want applies ONLY to stdout given before the last want >>> # Parts from previous examples are executed in the same context >>> print(x + y) 30 this is simply text, and doesnt apply to the previous doctest the directive is still in effect. Example: >>> from xdoctest import parser >>> from xdoctest.docstr import docscrape_google >>> from xdoctest import doctest_example >>> DocTest = doctest_example.DocTest >>> docstr = DocTest._parse.__doc__ >>> blocks = docscrape_google.split_google_docblocks(docstr) >>> doclineno = DocTest._parse.__code__.co_firstlineno >>> key, (docsrc, offset) = blocks[-2] >>> lineno = doclineno + offset >>> self = DocTest(docsrc, doctest_example.__file__, '_parse', 0, >>> lineno) >>> self._parse() >>> assert len(self._parts) >= 3 >>> #p1, p2, p3 = self._parts >>> self.run() """ if not self._parts: info = dict(callname=self.callname, modpath=self.modpath, lineno=self.lineno, fpath=self.fpath) self._parts = parser.DoctestParser().parse(self.docsrc, info) self._parts = [p for p in self._parts if not isinstance(p, str)] # Ensure part numbers are given for partno, part in enumerate(self._parts): part.partno = partno def _import_module(self): """ After this point we are in dynamic analysis mode, in most cases xdoctest should have been in static-analysis-only mode. Returns: None """ if self.module is None: if not self.modname.startswith('<'): # self.module = utils.import_module_from_path(self.modpath, index=0) if global_state.DEBUG_DOCTEST: print('Pre-importing modpath = {}'.format(self.modpath)) try: # Note: there is a possibility of conflicts that arises # here depending on your local environment. We may want to # try and detect that. self.module = utils.import_module_from_path(self.modpath, index=-1) except RuntimeError as ex: if global_state.DEBUG_DOCTEST: print('sys.path={}'.format(sys.path)) print('Failed to pre-import modpath = {}'.format(self.modpath)) msg_parts = [ ('XDoctest failed to pre-import the module ' 'containing the doctest.') ] msg_parts.append(str(ex)) new_exc = RuntimeError('\n'.join(msg_parts)) if not self.config['supress_import_errors']: raise else: # new_exc = ex # Remove traceback before this line new_exc.__traceback__ = None # Backwards syntax compatible raise exc from None # https://www.python.org/dev/peps/pep-3134/#explicit-exception-chaining new_exc.__cause__ = None raise new_exc else: if global_state.DEBUG_DOCTEST: print('Pre import success: self.module={}'.format(self.module)) @staticmethod def _extract_future_flags(namespace): """ Return the compiler-flags associated with the future features that have been imported into the given namespace (i.e. globals). Returns: int """ compileflags = 0 for key in __future__.all_feature_names: feature = namespace.get(key, None) if feature is getattr(__future__, key): compileflags |= feature.compiler_flag return compileflags def _test_globals(self): test_globals = self.global_namespace if self.module is None: compileflags = 0 else: # Its unclear what the side effects of populating globals with # __name__, __package__, etc are. They do cause differences. # between that and IPython code. Probably regular code too. # https://stackoverflow.com/questions/32175693/python-importlibs-analogue-for-imp-new-module # https://stackoverflow.com/questions/31191947/pickle-and-exec-in-python # import types # dummy_name = self.module.__name__ + '_xdoctest_sandbox' # if dummy_name in sys.modules: # dummy_mod = sys.modules[dummy_name] # else: # dummy_mod = types.ModuleType(dummy_name) # sys.modules[dummy_name] = dummy_mod test_globals.update(self.module.__dict__) # test_globals.update(dummy_mod.__dict__) # importable_attrs = { # k: v for k, v in self.module.__dict__.items() # if not k.startswith('__') # } # test_globals.update(importable_attrs) # test_globals['__name__'] = self.module.__name__ + '.doctest' # test_globals['__name__'] = '__main__' # test_globals['__file__'] = None # test_globals['__package__'] = None compileflags = self._extract_future_flags(test_globals) # force print function and division futures compileflags |= __future__.print_function.compiler_flag compileflags |= __future__.division.compiler_flag compileflags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT return test_globals, compileflags def anything_ran(self): """ Returns: bool """ # If everything was skipped, then there will be no stdout return len(self.logged_stdout) > 0 def run(self, verbose=None, on_error=None): """ Executes the doctest, checks the results, reports the outcome. Args: verbose (int): verbosity level on_error (str): can be 'raise' or 'return' Returns: Dict : summary """ on_error = self.config.getvalue('on_error', on_error) verbose = self.config.getvalue('verbose', verbose) if on_error not in {'raise', 'return'}: raise KeyError(on_error) self._parse() # parse out parts if we have not already done so self._pre_run(verbose) # Prepare for actual test run self.logged_evals.clear() self.logged_stdout.clear() self._unmatched_stdout = [] self._skipped_parts = [] self.exc_info = None self._suppressed_stdout = verbose <= 1 # Initialize a new runtime state default_state = self.config['default_runtime_state'] runstate = self._runstate = directive.RuntimeState(default_state) # setup reporting choice runstate.set_report_style(self.config['reportchoice'].lower()) # Defer the execution of the pre-import until we know at least one part # in the doctest will run. did_pre_import = False # Can't do this because we can't force execution of SCRIPTS # if self.is_disabled(): # runstate['SKIP'] = True needs_capture = True asyncio_runner = None is_running_in_loop = utils.util_asyncio.running() DEBUG = global_state.DEBUG_DOCTEST # Use the same capture object for all parts in the test cap = utils.CaptureStdout(suppress=self._suppressed_stdout, enabled=needs_capture) # NOTE: this will prevent any custom handling of warnings # See: https://github.com/Erotemic/xdoctest/issues/169 with warnings.catch_warnings(record=True) as self.warn_list: for partx, part in enumerate(self._parts): if DEBUG: print(f'part[{partx}] checking') # Prepare to capture stdout and evaluated values self.failed_part = part # Assume part will fail (it may not) got_eval = constants.NOT_EVALED # Extract directives and and update runtime state part_directive = part.directives if DEBUG: print(f'part[{partx}] directives: {part_directive}') try: try: runstate.update(part_directive) except Exception as ex: msg = ( 'Failed to parse directive: {} in {} at line {}. Caused by {}'.format( part_directive, self.fpath, self.lineno + part.line_offset, repr(ex))) raise Exception(msg) except Exception: self.exc_info = sys.exc_info() self.failed_tb_lineno = 1 # is this the directive line? if on_error == 'raise': raise break if DEBUG: print(f'part[{partx}] runstate={runstate}') print(f'runstate._inline_state={runstate._inline_state}') print(f'runstate._global_state={runstate._global_state}') # Handle runtime actions if runstate['SKIP'] or len(runstate['REQUIRES']) > 0: if DEBUG: print(f'part[{partx}] runstate requests skipping') self._skipped_parts.append(part) continue if not part.has_any_code(): if DEBUG: print(f'part[{partx}] No code, skipping') self._skipped_parts.append(part) continue if not did_pre_import: # Execute the pre-import before the first run of # non-skipped code. if DEBUG: print(f'part[{partx}] Importing parent module') try: self._import_module() except Exception: self.failed_part = '' self._partfilename = '' self.exc_info = sys.exc_info() if on_error == 'raise': raise else: summary = self._post_run(verbose) return summary test_globals, compileflags = self._test_globals() if DEBUG: print('Global names = {}'.format(sorted(test_globals.keys()))) global_exec = self.config.getvalue('global_exec') if global_exec: # Hack to make it easier to specify multi-line input on the CLI global_source = utils.codeblock(global_exec.replace('\\n', '\n')) global_code = compile( global_source, mode='exec', filename='', flags=compileflags, dont_inherit=True ) exec(global_code, test_globals) did_pre_import = True try: # Compile code, handle syntax errors # part.compile_mode can be single, exec, or eval. # Typically single is used instead of eval self._partfilename = '' source_text = part.compilable_source() code = compile( source_text, mode=part.compile_mode, filename=self._partfilename, flags=compileflags, dont_inherit=True ) except KeyboardInterrupt: # nocover raise except Exception: raise # self.exc_info = sys.exc_info() # ex_type, ex_value, tb = self.exc_info # self.failed_tb_lineno = tb.tb_lineno # if on_error == 'raise': # raise try: try: # close the asyncio runner (context exit) if asyncio_runner is not None and not runstate['ASYNC']: try: asyncio_runner.close() finally: asyncio_runner = None # Execute the doctest code try: # NOTE: For code passed to eval or exec, there is no # difference between locals and globals. Only pass in # one dict, otherwise there is weird behavior with cap: # We can execute each part using exec or eval. If # a doctest part has `compile_mode=eval` we # expect it to return an object with a repr that # can compared to a "want" statement. # print('part.compile_mode = {!r}'.format(part.compile_mode)) is_coroutine = code.co_flags & CO_COROUTINE == CO_COROUTINE if is_coroutine or runstate['ASYNC']: if is_running_in_loop: raise exceptions.ExistingEventLoopError( "Cannot run async doctests from within a running event loop: %s", part.orig_lines ) if asyncio_runner is None: asyncio_runner = utils.util_asyncio.Runner() async def corofunc(): if is_coroutine: return await eval(code, test_globals) else: return eval(code, test_globals) if part.compile_mode == 'eval': got_eval = asyncio_runner.run(corofunc()) else: asyncio_runner.run(corofunc()) else: if part.compile_mode == 'eval': got_eval = eval(code, test_globals) else: exec(code, test_globals) # Record any standard output and "got_eval" produced by # this doctest_part. self.logged_evals[partx] = got_eval self.logged_stdout[partx] = cap.text except Exception: if part.want: # A failure may be expected if the traceback # matches the part's want statement. exception = sys.exc_info() traceback.format_exception_only(*exception[:2]) exc_got = traceback.format_exception_only(*exception[:2])[-1] want = part.want checker.check_exception(exc_got, want, runstate) else: raise else: """ TODO: [ ] - Delay got-want failure until the end of the doctest. Allow the rest of the code to run. If multiple errors occur, show them both. """ if part.want: got_stdout = cap.text if not runstate['IGNORE_WANT']: part.check(got_stdout, got_eval, runstate, unmatched=self._unmatched_stdout) # Clear unmatched output when a check passes self._unmatched_stdout = [] else: # If a part doesnt have a want allow its output to # be matched by the next part. self._unmatched_stdout.append(cap.text) except BaseException: # close the asyncio runner (base exception) if asyncio_runner is not None: try: asyncio_runner.close() finally: asyncio_runner = None raise else: # close the asyncio runner (top-level await) if asyncio_runner is not None and not runstate['ASYNC']: try: asyncio_runner.close() finally: asyncio_runner = None # Handle anything that could go wrong except KeyboardInterrupt: # nocover raise except (exceptions.ExitTestException, exceptions._pytest.outcomes.Skipped) as ex: if verbose > 0: print('Test gracefully exists on: ex={}'.format(ex)) break except exceptions.ExistingEventLoopError: # When we try to run a doctest with await, but there is # already a running event loop. self.exc_info = sys.exc_info() if on_error == 'raise': raise break except checker.GotWantException: # When the "got", doesn't match the "want" self.exc_info = sys.exc_info() if on_error == 'raise': raise break except checker.ExtractGotReprException as ex: # When we fail to extract the "got" self.exc_info = sys.exc_info() if on_error == 'raise': raise ex.orig_ex break except Exception as _ex_dbg: ex_type, ex_value, tb = sys.exc_info() DEBUG = global_state.DEBUG_DOCTEST if DEBUG: print('_ex_dbg = {!r}'.format(_ex_dbg)) print('', file=sys.stderr) print(''.join(traceback.format_tb(tb)), file=sys.stderr) print('', file=sys.stderr) # Search for the traceback that corresponds with the # doctest, and remove the parts that point to # boilerplate lines in this file. found_lineno = None for sub_tb in _traverse_traceback(tb): tb_filename = sub_tb.tb_frame.f_code.co_filename if tb_filename == self._partfilename: # Walk up the traceback until we find the one that has # the doctest as the base filename found_lineno = sub_tb.tb_lineno break if DEBUG: # The only traceback remaining should be # the part that is relevant to the user print('', file=sys.stderr) print('found_lineno = {!r}'.format(found_lineno), file=sys.stderr) print(''.join(traceback.format_tb(sub_tb)), file=sys.stderr) print('', file=sys.stderr) if found_lineno is None: if DEBUG: print('UNABLE TO CLEAN TRACEBACK. EXIT DUE TO DEBUG') sys.exit(1) raise ValueError('Could not clean traceback: ex = {!r}'.format(_ex_dbg)) else: self.failed_tb_lineno = found_lineno self.exc_info = (ex_type, ex_value, tb) # The idea of CLEAN_TRACEBACK is to make it so the # traceback from this function doesn't clutter the error # message the user sees. if on_error == 'raise': raise break finally: if cap.enabled: assert cap.text is not None # Ensure that we logged the output even in failure cases self.logged_evals[partx] = got_eval self.logged_stdout[partx] = cap.text # close the asyncio runner (no exception) if asyncio_runner is not None: try: asyncio_runner.close() finally: asyncio_runner = None if self.exc_info is None: self.failed_part = None if len(self._skipped_parts) == len(self._parts): # we skipped everything if self.mode == 'pytest': import pytest pytest.skip() summary = self._post_run(verbose) # Clear the global namespace so doctests don't leak memory self.global_namespace.clear() return summary @property def globs(self): """ Alias for ``global_namespace`` for pytest 8.0 compatibility """ return self.global_namespace @property def cmdline(self): """ A cli-instruction that can be used to execute *this* doctest. Returns: str: """ if self.mode == 'pytest': return 'pytest ' + self.node elif self.mode == 'native': ALLOW_MODNAME_CMDLINE = False if ALLOW_MODNAME_CMDLINE: # not 100% reliable if any dynamic code has executed before # or we are doing self-testing in_path = static.is_modname_importable(self.modname) if in_path: # should be able to find the module by name return 'python -m xdoctest ' + self.modname + ' ' + self.unique_callname else: # needs the full path to be able to run the module return 'python -m xdoctest ' + self.modpath + ' ' + self.unique_callname else: # Probably safer to always use the path return 'python -m xdoctest ' + self.modpath + ' ' + self.unique_callname else: raise KeyError(self.mode) @property def _block_prefix(self): return 'ZERO-ARG' if self.block_type == 'zero-arg' else 'DOCTEST' def _pre_run(self, verbose): if verbose >= 1: if verbose >= 2: barrier = self._color('====== ======', 'white') print(barrier) if self.block_type == 'zero-arg': # zero-arg funcs arent doctests, but we can still run them print('* ZERO-ARG FUNC : {}'.format(self.node)) else: print('* DOCTEST : {}, line {}'.format(self.node, self.lineno) + self._color(' <- wrt source file', 'white')) if verbose >= 3: print(self._color(self._block_prefix + ' SOURCE', 'white')) print(self.format_src()) print(self._color(self._block_prefix + ' STDOUT/STDERR', 'white')) def failed_line_offset(self): """ Determine which line in the doctest failed. Returns: int | None """ if self.exc_info is None: return None else: if self.failed_part == '': return 0 ex_type, ex_value, tb = self.exc_info offset = self.failed_part.line_offset if isinstance(ex_value, (checker.ExtractGotReprException, exceptions.ExistingEventLoopError)): # Return the line of the "got" expression offset += self.failed_part.n_exec_lines elif isinstance(ex_value, checker.GotWantException): # Return the line of the want line offset += self.failed_part.n_exec_lines + 1 else: offset += self.failed_tb_lineno offset -= 1 return offset def failed_lineno(self): """ Returns: int | None """ offset = self.failed_line_offset() if offset is None: return None else: # Find the first line of the part lineno = self.lineno + offset return lineno def repr_failure(self, with_tb=True): r""" Constructs lines detailing information about a failed doctest Args: with_tb (bool): if True include the traceback Returns: List[str] CommandLine: python -m xdoctest.core DocTest.repr_failure:0 python -m xdoctest.core DocTest.repr_failure:1 python -m xdoctest.core DocTest.repr_failure:2 Example: >>> from xdoctest.core import * >>> docstr = utils.codeblock( ''' >>> x = 1 >>> print(x + 1) 2 >>> print(x + 3) 3 >>> print(x + 100) 101 ''') >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42) >>> self = list(parse_docstr_examples(docstr, **parsekw))[0] >>> summary = self.run(on_error='return', verbose=0) >>> print('[res]' + '\n[res]'.join(self.repr_failure())) Example: >>> from xdoctest.core import * >>> docstr = utils.codeblock( r''' >>> 1 1 >>> print('.▴ .\n.▴ ▴.') # xdoc: -NORMALIZE_WHITESPACE . ▴ . .▴ ▴. ''') >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42) >>> self = list(parse_docstr_examples(docstr, **parsekw))[0] >>> summary = self.run(on_error='return', verbose=1) >>> print('[res]' + '\n[res]'.join(self.repr_failure())) Example: >>> from xdoctest.core import * >>> docstr = utils.codeblock( ''' >>> assert True >>> assert False >>> x = 100 ''') >>> self = list(parse_docstr_examples(docstr))[0] >>> summary = self.run(on_error='return', verbose=0) >>> print('[res]' + '\n[res]'.join(self.repr_failure())) """ # '=== LINES ===', # ] # if '--xdoc-debug' in sys.argv: # lines += ['DEBUG PARTS: '] # for partx, part in enumerate(self._parts): # lines += [str(partx) + ': ' + str(part)] # lines += [' directives: {!r}'.format(part.directives)] # lines += [' want: {!r}'.format(str(part.want)[0:25])] # val = self.logged_evals.get(partx, None) # lines += [' eval: ' + repr(val)] # val = self.logged_stdout.get(partx, None) # lines += [' stdout: ' + repr(val)] # partx = self._parts.index(self.failed_part) # lines += [ # 'failed partx = {}'.format(partx) # ] # failed_part = self.failed_part # lines += ['----'] # lines += ['Failed part line offset:'] # lines += ['{}'.format(failed_part.line_offset)] # lines += ['Failed directives:'] # lines += ['{}'.format(list(failed_part.directives))] # lines += ['Failed part source:'] # lines += failed_part.exec_lines # lines += ['Failed part want:'] # if failed_part.want_lines: # lines += failed_part.want_lines # lines += ['Failed part stdout:'] # lines += self.logged_stdout[partx].splitlines() # lines += ['Failed part eval:'] # lines += [repr(self.logged_evals[partx])] # lines += ['----'] # lines += [ # # 'self.module = {}'.format(self.module), # # 'self.modpath = {}'.format(self.modpath), # # 'self.modpath = {}'.format(self.modname), # # 'self.global_namespace = {}'.format(self.global_namespace.keys()), # ] # lines += ['Failed doctest in ' + self.callname] if self.exc_info is None: return [] ex_type, ex_value, tb = self.exc_info # Failure line offset wrt the doctest (starts from 0) fail_offset = self.failed_line_offset() # Failure line number wrt the entire file (starts from 1) fail_lineno = self.failed_lineno() lines = [ '* REASON: {}'.format(ex_type.__name__), self._color(self._block_prefix + ' DEBUG INFO', 'white'), ' XDoc "{}", line {}'.format(self.node, fail_offset + 1) + self._color(' <- wrt doctest', 'red'), ] colored = self.config['colored'] if fail_lineno is not None: fpath = self.UNKNOWN_FPATH if self.fpath is None else self.fpath lines += [' File "{}", line {},'.format(fpath, fail_lineno) + self._color(' <- wrt source file', 'red')] # lines += [' in doctest "{}", line {}'.format(self.unique_callname, # fail_offset + 1) + # self._color(' <- relative line number in the docstest', 'red')] # source_text = self.format_src(colored=colored, linenos=True, # want=False) # source_text = utils.indent(source_text) # lines += source_text.splitlines() def r1_strip_nl(text): if text is None: return None return text[:-1] if text.endswith('\n') else text # if self.logged_stdout: # lines += ['stdout results:'] # lines += [r1_strip_nl(t) for t in self.logged_stdout.values() if t] textgen = self.format_parts(colored=colored, linenos=True, want=False) n_digits = 1 # Logic to break output between pass, failed, and unexecuted parts before_part_lines = [] fail_part_lines = [] after_parts_lines = [] temp = [before_part_lines, fail_part_lines, after_parts_lines] tindex = 0 indent_text = ' ' * (5 + n_digits) for partx, (part, part_text) in enumerate(zip(self._parts, textgen)): if part in self._skipped_parts: # temp[tindex] += [utils.indent(part_text, ' ' * 4)] # temp[tindex] += [utils.indent(' >>> # skipped', indent_text)] continue part_out = r1_strip_nl(self.logged_stdout.get(partx, '')) if part is self.failed_part: tindex += 1 # Append the part source code temp[tindex] += [utils.indent(part_text, ' ' * 4)] # Append the part stdout (if it exists) if part_out: temp[tindex] += [utils.indent(part_out, indent_text)] if part is self.failed_part: tindex += 1 # part_eval = self.logged_evals[partx] # if part_eval is not NOT_EVALED: # temp[tindex] += [repr(part_eval)] lines += [self._color(self._block_prefix + ' PART BREAKDOWN', 'white')] if before_part_lines: lines += ['Passed Parts:'] lines += before_part_lines if fail_part_lines: lines += ['Failed Part:'] lines += fail_part_lines if after_parts_lines: lines += ['Remaining Parts:'] lines += after_parts_lines lines += [self._color(self._block_prefix + ' TRACEBACK', 'white')] if hasattr(ex_value, 'output_difference'): lines += [ ex_value.output_difference(self._runstate, colored=colored), ex_value.output_repr_difference(self._runstate) ] else: if with_tb: # TODO: enhance formatting to show an IPython-like output of # where the error occurred in the doctest tblines = traceback.format_exception(*self.exc_info) def _alter_traceback_linenos(self, tblines): def overwrite_lineno(linepart): # Replace the trailing part which is the lineno old_linestr = linepart[-1] # noqa # This is the lineno we will insert rel_lineno = self.failed_part.line_offset + tb_lineno abs_lineno = self.lineno + rel_lineno - 1 new_linestr = 'rel: {rel}, abs: {abs}'.format( rel=rel_lineno, abs=abs_lineno, ) linepart = linepart[:-1] + [new_linestr] return linepart new_tblines = [] for i, line in enumerate(tblines): # if '= 2: print(self._color(self._block_prefix + ' RESULT', 'white')) if self.exc_info is None: if verbose >= 1: if verbose >= 2: if self._suppressed_stdout: self._print_captured() if skipped: success = self._color('SKIPPED', 'yellow') else: success = self._color('SUCCESS', 'green') print('* {}: {}'.format(success, self.node)) else: if verbose >= 1: failure = self._color('FAILURE', 'red') print('* {}: {}'.format(failure, self.node)) if verbose >= 2: lines = self.repr_failure() text = '\n'.join(lines) print(text) if verbose >= 2: barrier = self._color('====== ======', 'white') print(barrier) return summary def _traverse_traceback(tb): # Lives down here to avoid issue calling exec in a function that contains a # nested function with free variable. Not sure how necessary this is # because this doesn't have free variables. sub_tb = tb yield sub_tb while sub_tb.tb_next is not None: sub_tb = sub_tb.tb_next yield sub_tb if __name__ == '__main__': r""" CommandLine: python -m xdoctest.doctest_example """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/src/xdoctest/doctest_example.pyi000066400000000000000000000053451505122333300240650ustar00rootroot00000000000000from typing import Any from os import PathLike from types import ModuleType from types import TracebackType from typing import Dict from collections import OrderedDict from typing import Set from typing import List from collections.abc import Generator from xdoctest.doctest_part import DoctestPart __devnotes__: str __docstubs__: str class DoctestConfig(dict): def __init__(self, *args, **kwargs) -> None: ... def getvalue(self, key: str, given: Any | None = None) -> Any: ... class DocTest: UNKNOWN_MODNAME: str UNKNOWN_MODPATH: str UNKNOWN_CALLNAME: str UNKNOWN_FPATH: str docsrc: str modpath: str | PathLike callname: str num: int lineno: int fpath: PathLike block_type: str | None mode: str config: DoctestConfig module: ModuleType | None modname: str failed_tb_lineno: int | None exc_info: None | TracebackType failed_part: None | DoctestPart warn_list: list logged_evals: OrderedDict logged_stdout: OrderedDict global_namespace: dict def __init__(self, docsrc: str, modpath: str | PathLike | None = None, callname: str | None = None, num: int = 0, lineno: int = 1, fpath: str | None = None, block_type: str | None = None, mode: str = 'pytest') -> None: ... def __nice__(self) -> str: ... def is_disabled(self, pytest: bool = ...) -> bool: ... @property def unique_callname(self) -> str: ... @property def node(self) -> str: ... @property def valid_testnames(self) -> Set[str]: ... def wants(self) -> Generator[str, None, None]: ... def format_parts(self, linenos: bool = True, colored: bool | None = None, want: bool = True, offset_linenos: bool | None = None, prefix: bool = True) -> Generator[Any, None, None]: ... def format_src(self, linenos: bool = True, colored: bool | None = None, want: bool = True, offset_linenos: bool | None = None, prefix: bool = True) -> str: ... def anything_ran(self) -> bool: ... def run(self, verbose: int | None = None, on_error: str | None = None) -> Dict: ... @property def cmdline(self) -> str: ... def failed_line_offset(self) -> int | None: ... def failed_lineno(self) -> int | None: ... def repr_failure(self, with_tb: bool = True) -> List[str]: ... Erotemic-xdoctest-fac8308/src/xdoctest/doctest_part.py000066400000000000000000000270201505122333300232210ustar00rootroot00000000000000""" Simple storage container used to store a single executable part of a doctest example. Multiple parts are typically stored in a :class:`xdoctest.doctest_example.Doctest`, which manages execution of each part. """ import math from xdoctest import utils from xdoctest import checker from xdoctest import directive from xdoctest import constants __devnotes__ = """ TODO: perhaps rename doctest part to DoctestCell, because there is a striking similarity between Jupyter notebook cells and doctest parts. """ class DoctestPart: """ The result of parsing that represents a "logical block" of code. If a want statement is defined, it is stored here. Attributes: exec_lines (List[str]): executable lines in this part want_lines (List[str] | None): lines that the result of the execution should match line_offset (int): line number relative to the start of the doctest orig_lines (List[str] | None): the original text parsed into exec and want directives (list | None): directives that this part will apply before being run partno (int | None): identifies the part number in the larger example compile_mode (str): mode passed to compile. """ def __init__(self, exec_lines, want_lines=None, line_offset=0, orig_lines=None, directives=None, partno=None): """ Args: exec_lines (List[str]): executable lines in this part want_lines (List[str] | None): lines that the result of the execution should match line_offset (int): line number relative to the start of the doctest orig_lines (List[str] | None): The original text parsed into exec and want. This is only used in formatting and may be removed in the future. directives (list | None): directives that this part will apply before being run. If unspecified, these will be extracted. partno (int | None): identifies the part number in the larger example """ self.exec_lines = exec_lines self.want_lines = want_lines self.line_offset = line_offset self.orig_lines = orig_lines self._directives = directives self.partno = partno self.compile_mode = 'exec' @property def n_lines(self): """ Returns: int: number of lines in the entire source (i.e. exec + want) """ return self.n_exec_lines + self.n_want_lines @property def n_exec_lines(self): """ Returns: int: number of executable lines (excluding want) """ return len(self.exec_lines) @property def n_want_lines(self): """ Returns: int: number of lines in the "want" statement. """ if self.want_lines: return len(self.want_lines) else: return 0 @property def source(self): """ Returns: str: A single block of text representing the source code. """ return '\n'.join(self.exec_lines) def compilable_source(self): """ Use this to build the string for compile. Takes care of a corner case. Returns: str """ if self.compile_mode == 'single': return '\n'.join(self.exec_lines + ['']) else: return '\n'.join(self.exec_lines) def has_any_code(self): """ Heuristic to check if there is any runnable code in this doctest. We currently just check that not every line is a comment, which helps the runner count a test as skipped if only lines with comments "ran". Returns: bool """ slines = [line.strip() for line in self.exec_lines] return not all( not line or line.startswith('#') for line in slines ) @property def directives(self): """ Returns: List[directive.Directive]: The extracted or provided directives to be applied. Example: >>> self = DoctestPart(['# doctest: +SKIP'], None, 0) >>> print(', '.join(list(map(str, self.directives)))) """ if self._directives is None: self._directives = list(directive.Directive.extract(self.source)) return self._directives @property def want(self): """ Returns: str | None: what the test is expected to produce """ if self.want_lines: return '\n'.join(self.want_lines) else: return None def __nice__(self): """ Returns: str: a pretty and concise "nice" representation """ parts = [] if self.line_offset is not None: parts.append('ln %s' % (self.line_offset)) if self.source: head_src = self.source.splitlines()[0][0:8] parts.append('src="%s..."' % (head_src,)) else: parts.append('src=""') if self.want is None: parts.append('want=None') else: head_wnt = self.want.splitlines()[0][0:8] parts.append('want="%s..."' % (head_wnt,)) return ', '.join(parts) def __repr__(self): classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s) at %s>' % (classname, devnice, hex(id(self))) def __str__(self): classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s)>' % (classname, devnice) def check(part, got_stdout, got_eval=constants.NOT_EVALED, runstate=None, unmatched=None): r""" Check if the "got" output obtained by running this test matches the "want" target. Note there are two types of "got" output: (1) output from stdout and (2) evaled output. If both are specified, then want may match either value. Args: got_stdout (str): output from stdout got_eval (str): output from an eval statement runstate (directive.RuntimeState): runner options unmatched (list): if specified, the want statement is allowed to match any trailing sequence of unmatched output and got_stdout from this doctest part. Raises: xdoctest.checker.GotWantException - If the "got" differs from this parts want. Example: >>> # xdoctest: +REQUIRES(module:pytest) >>> import pytest >>> got_stdout = 'more text\n' >>> unmatched = ['some text\n'] >>> self = DoctestPart(None, want_lines=['some text', 'more text']) >>> self.check(got_stdout, unmatched=unmatched) >>> # Leading junk doesnt matter if we match a trailing sequence >>> self.check(got_stdout, unmatched=['junk\n'] + unmatched) >>> # fail when want doesnt match any trailing sequence >>> with pytest.raises(checker.GotWantException): >>> self.check(got_stdout) >>> with pytest.raises(checker.GotWantException): >>> self.check(got_stdout, ['some text\n', 'junk\n']) """ if unmatched is None: unmatched = [] trailing_gots = unmatched + [got_stdout] success = False exceptions = [] for i in range(1, len(trailing_gots) + 1): # Try the i-th trailing sequence got_ = ''.join(trailing_gots[-i:]) try: checker.check_got_vs_want(part.want, got_, got_eval, runstate) except checker.GotWantException as ex: exceptions.append(ex) else: success = True break if not success: # for ex in exceptions: # print(ex.output_difference()) # print(ex.output_repr_difference()) # If none of the checks pass, return the error message with the # largest got message. (perhaps the output with the closest edit # distance might be better to report?) raise exceptions[-1] def format_part(self, linenos=True, want=True, startline=1, n_digits=None, colored=False, partnos=False, prefix=True): """ Customizable formatting of the source and want for this doctest. Args: linenos (bool): show line numbers want (bool): include the want value if it exists startline (int): offsets the line numbering n_digits (int): number of digits to use for line numbers colored (bool): pygmentize the code partnos (bool): if True, shows the part number in the string prefix (bool): if False, exclude the doctest ``>>> `` prefix Returns: str: pretty text suitable for printing CommandLine: python -m xdoctest.doctest_part DoctestPart.format_part Example: >>> from xdoctest.parser import * >>> self = DoctestPart(exec_lines=['print(123)'], >>> want_lines=['123'], line_offset=0, partno=1) >>> # xdoctest: -NORMALIZE_WHITESPACE >>> print(self.format_part(partnos=True)) (p1) 1 >>> print(123) 123 Example: >>> from xdoctest.parser import * >>> self = DoctestPart(exec_lines=['print(123)'], >>> want_lines=['123'], line_offset=0, partno=1) >>> # xdoctest: -NORMALIZE_WHITESPACE >>> print(self.format_part(partnos=False, prefix=False, >>> linenos=False, want=False)) print(123) """ if prefix: # Show the original line prefix when possible if self.orig_lines is None: src_text = utils.indent(self.source, '>>> ') else: src_text = '\n'.join(self.orig_lines) else: src_text = self.source want_text = self.want if self.want else '' if n_digits is None: endline = startline + self.n_lines n_digits = math.log(max(1, endline), 10) n_digits = int(math.ceil(n_digits)) part_lines = src_text.splitlines() n_spaces = 0 if linenos: n_spaces += n_digits + 1 start = startline + self.line_offset part_lines = utils.add_line_numbers(part_lines, n_digits=n_digits, start=start) if partnos: part_lines = [ '(p{}) {}'.format(self.partno, line) for line in part_lines ] n_spaces += 4 + 1 # FIXME could be more robust if more than 9 parts want_lines = [] if want_text: want_fmt = ' ' * n_spaces + '{line}' for line in want_text.splitlines(): if want: want_lines.append(want_fmt.format(line=line)) part_text = '\n'.join(part_lines) want_text = '\n'.join(want_lines) if colored: part_text = utils.highlight_code(part_text, 'python') want_text = utils.color_text(want_text, 'green') if want_lines: part_text += '\n' + want_text return part_text if __name__ == '__main__': """ CommandLine: python -m xdoctest.doctest_part """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/src/xdoctest/doctest_part.pyi000066400000000000000000000031771505122333300234010ustar00rootroot00000000000000from typing import List from xdoctest import directive __devnotes__: str class DoctestPart: exec_lines: List[str] want_lines: List[str] | None line_offset: int orig_lines: List[str] | None partno: int | None compile_mode: str def __init__(self, exec_lines: List[str], want_lines: List[str] | None = None, line_offset: int = 0, orig_lines: List[str] | None = None, directives: list | None = None, partno: int | None = None) -> None: ... @property def n_lines(self) -> int: ... @property def n_exec_lines(self) -> int: ... @property def n_want_lines(self) -> int: ... @property def source(self) -> str: ... def compilable_source(self) -> str: ... def has_any_code(self) -> bool: ... @property def directives(self) -> List[directive.Directive]: ... @property def want(self) -> str | None: ... def __nice__(self) -> str: ... def check(part, got_stdout: str, got_eval: str = ..., runstate: directive.RuntimeState | None = None, unmatched: list | None = None) -> None: ... def format_part(self, linenos: bool = True, want: bool = True, startline: int = 1, n_digits: int | None = None, colored: bool = False, partnos: bool = False, prefix: bool = True) -> str: ... Erotemic-xdoctest-fac8308/src/xdoctest/dynamic_analysis.py000066400000000000000000000234031505122333300240560ustar00rootroot00000000000000""" Utilities for dynamically inspecting code """ import inspect import os import types def parse_dynamic_calldefs(modpath_or_module): """ Dynamic parsing of module doctestable items. Unlike static parsing this forces execution of the module code before test-time, however the former is limited to plain-text python files whereas this can discover doctests in binary extension libraries. Args: modpath_or_module (str | PathLike | ModuleType): path to module or the module itself Returns: Dict[str, xdoctest.static_analysis.CallDefNode]: mapping from callnames to CallDefNodes, which contain info about the item with the doctest. CommandLine: python -m xdoctest.dynamic_analysis parse_dynamic_calldefs Example: >>> from xdoctest import dynamic_analysis >>> module = dynamic_analysis >>> calldefs = parse_dynamic_calldefs(module.__file__) >>> for key, calldef in sorted(calldefs.items()): ... print('key = {!r}'.format(key)) ... print(' * calldef.callname = {}'.format(calldef.callname)) ... if calldef.docstr is None: ... print(' * len(calldef.docstr) = {}'.format(calldef.docstr)) ... else: ... print(' * len(calldef.docstr) = {}'.format(len(calldef.docstr))) """ from xdoctest import static_analysis as static import types if isinstance(modpath_or_module, types.ModuleType): module = modpath_or_module else: modpath = modpath_or_module if modpath.endswith('.ipynb'): """ # Devnote: modpath = ub.expandpath("~/code/xdoctest/tests/notebook_with_doctests.ipynb") xdoctest ~/code/xdoctest/tests/notebook_with_doctests.ipynb """ from xdoctest.utils import util_notebook module = util_notebook.import_notebook_from_path(modpath) else: # Possible option for dynamic parsing from xdoctest.utils import util_import module = util_import.import_module_from_path(modpath) calldefs = {} if getattr(module, '__doc__'): calldefs['__doc__'] = static.CallDefNode( callname='__doc__', docstr=module.__doc__, lineno=0, doclineno=1, doclineno_end=1, args=None ) for key, val in iter_module_doctestables(module): # if hasattr(val, '__doc__'): if hasattr(val, '__doc__') and hasattr(val, '__name__'): calldefs[key] = static.CallDefNode( callname=val.__name__, docstr=val.__doc__, lineno=0, doclineno=1, doclineno_end=1, args=None ) return calldefs def get_stack_frame(n=0, strict=True): """ Gets the current stack frame or any of its ancestors dynamically Args: n (int): n=0 means the frame you called this function in. n=1 is the parent frame. strict (bool): (default = True) Returns: FrameType: frame_cur Example: >>> frame_cur = get_stack_frame(n=0) >>> print('frame_cur = %r' % (frame_cur,)) >>> assert frame_cur.f_globals['frame_cur'] is frame_cur """ frame_cur = inspect.currentframe() # Use n+1 to always skip the frame of this function for ix in range(n + 1): frame_next = frame_cur.f_back if frame_next is None: # nocover if strict: raise AssertionError('Frame level %r is root' % ix) else: break frame_cur = frame_next return frame_cur def get_parent_frame(n=0): """ Returns the frame of that called you. This is equivalent to `get_stack_frame(n=1)` Args: n (int): n=0 means the frame you called this function in. n=1 is the parent frame. Returns: FrameType: parent_frame Example: >>> root0 = get_stack_frame(n=0) >>> def foo(): >>> child = get_stack_frame(n=0) >>> root1 = get_parent_frame(n=0) >>> root2 = get_stack_frame(n=1) >>> return child, root1, root2 >>> # Note this wont work in IPython because several >>> # frames will be inserted between here and foo >>> child, root1, root2 = foo() >>> print('root0 = %r' % (root0,)) >>> print('root1 = %r' % (root1,)) >>> print('root2 = %r' % (root2,)) >>> print('child = %r' % (child,)) >>> assert root0 == root1 >>> assert root1 == root2 >>> assert child != root1 """ parent_frame = get_stack_frame(n=n + 2) return parent_frame def iter_module_doctestables(module): r""" Yields doctestable objects that belong to a live python module Args: module (ModuleType): live python module Yields: Tuple[str, callable]: (funcname, func) doctestable CommandLine: python -m xdoctest.dynamic_analysis iter_module_doctestables Example: >>> from xdoctest import dynamic_analysis >>> module = dynamic_analysis >>> doctestable_list = list(iter_module_doctestables(module)) >>> items = sorted([str(item) for item in doctestable_list]) >>> print('[' + '\n'.join(items) + ']') """ valid_func_types = ( types.FunctionType, types.BuiltinFunctionType, types.MethodType, classmethod, staticmethod, property, ) def _recurse(item, module): return is_defined_by_module(item, module) for key, val in module.__dict__.items(): if isinstance(val, valid_func_types): if not _recurse(val, module): continue yield key, val elif isinstance(val, type): if not _recurse(val, module): continue # Yield the class itself yield key, val # Yield methods of the class for subkey, subval in val.__dict__.items(): # Unbound methods are still typed as functions if isinstance(subval, valid_func_types): if not _recurse(subval, module): continue # unpack underlying function if isinstance(subval, property): item = subval.fget elif isinstance(subval, staticmethod): item = subval.__func__ elif isinstance(subval, classmethod): item = subval.__func__ else: item = subval yield key + '.' + subkey, item def is_defined_by_module(item, module): """ Check if item is directly defined by a module. This check may not always work, especially for decorated functions. Args: item (object): item to check module (ModuleType): module to check against CommandLine: xdoctest -m xdoctest.dynamic_analysis is_defined_by_module Example: >>> from xdoctest import dynamic_analysis >>> item = dynamic_analysis.is_defined_by_module >>> module = dynamic_analysis >>> assert is_defined_by_module(item, module) >>> item = dynamic_analysis.inspect >>> assert not is_defined_by_module(item, module) >>> item = dynamic_analysis.inspect.ismodule >>> assert not is_defined_by_module(item, module) >>> assert not is_defined_by_module(print, module) >>> # xdoctest: +REQUIRES(CPython) >>> import _ctypes >>> item = _ctypes.Array >>> module = _ctypes >>> assert is_defined_by_module(item, module) >>> item = _ctypes.CFuncPtr.restype >>> module = _ctypes >>> assert is_defined_by_module(item, module) """ from xdoctest import static_analysis as static target_modname = module.__name__ # invalid_types = (int, float, list, tuple, set) # if isinstance(item, invalid_types) or isinstance(item, str): # raise TypeError('can only test definitions for classes and functions') flag = False if isinstance(item, types.ModuleType): if not hasattr(item, '__file__'): try: # hack for cv2 and xfeatures2d name = static.modpath_to_modname(module.__file__) flag = name in str(item) except Exception: flag = False else: item_modpath = os.path.realpath(os.path.dirname(item.__file__)) mod_fpath = module.__file__.replace('.pyc', '.py') if not mod_fpath.endswith('__init__.py'): flag = False else: modpath = os.path.realpath(os.path.dirname(mod_fpath)) modpath = modpath.replace('.pyc', '.py') flag = item_modpath.startswith(modpath) else: # unwrap static/class/property methods if isinstance(item, property): item = item.fget if isinstance(item, staticmethod): item = item.__func__ if isinstance(item, classmethod): item = item.__func__ if getattr(item, '__module__', None) == target_modname: flag = True elif hasattr(item, '__objclass__'): # should we just unwrap objclass? parent = item.__objclass__ if getattr(parent, '__module__', None) == target_modname: flag = True if not flag: try: item_modname = item.__globals__['__name__'] if item_modname == target_modname: flag = True except AttributeError: pass return flag if __name__ == '__main__': import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/dynamic_analysis.pyi000066400000000000000000000012711505122333300242260ustar00rootroot00000000000000from os import PathLike from types import ModuleType from typing import Dict import xdoctest from types import FrameType from typing import Callable from typing import Tuple from collections.abc import Generator from typing import Any def parse_dynamic_calldefs( modpath_or_module: str | PathLike | ModuleType ) -> Dict[str, xdoctest.static_analysis.CallDefNode]: ... def get_stack_frame(n: int = 0, strict: bool = True) -> FrameType: ... def get_parent_frame(n: int = 0) -> FrameType: ... def iter_module_doctestables( module: ModuleType) -> Generator[Tuple[str, Callable], None, Any]: ... def is_defined_by_module(item: object, module: ModuleType): ... Erotemic-xdoctest-fac8308/src/xdoctest/exceptions.py000066400000000000000000000025741505122333300227160ustar00rootroot00000000000000""" Define errors that may be raised by xdoctest """ class MalformedDocstr(Exception): """ Exception raised when the docstring itself does not conform to the expected style (e.g. google / numpy). """ class ExistingEventLoopError(Exception): """ Exception raised when the docstring uses a top level await and the test is already running in an event loop. """ class DoctestParseError(Exception): """ Exception raised when doctest code has an error. """ def __init__(self, msg, string=None, info=None, orig_ex=None): """ Args: msg (str): error message string (str | None): the string that failed info (Any | None): extra information orig_ex (Exception | None): The underlying exceptoin """ super(DoctestParseError, self).__init__(msg) self.msg = msg self.string = string self.info = info self.orig_ex = orig_ex class ExitTestException(Exception): pass class IncompleteParseError(SyntaxError): """ Used when something goes wrong in the xdoctest parser """ try: import _pytest import _pytest.outcomes except ImportError: # nocover # Define dummy skipped exception if pytest is not available class _pytest: # type: ignore class outcomes: class Skipped(Exception): pass Erotemic-xdoctest-fac8308/src/xdoctest/exceptions.pyi000066400000000000000000000011741505122333300230620ustar00rootroot00000000000000from typing import Any class MalformedDocstr(Exception): ... class ExistingEventLoopError(Exception): ... class DoctestParseError(Exception): msg: str string: str | None info: Any | None orig_ex: Exception | None def __init__(self, msg: str, string: str | None = None, info: Any | None = None, orig_ex: Exception | None = None) -> None: ... class ExitTestException(Exception): ... class IncompleteParseError(SyntaxError): ... class _pytest: class outcomes: class Skipped(Exception): ... Erotemic-xdoctest-fac8308/src/xdoctest/global_state.py000066400000000000000000000012521505122333300231650ustar00rootroot00000000000000""" Global state initialized at import time. Used for hidden arguments and developer features. """ import os import sys def _boolean_environ(key): """ Args: key (str) Returns: bool """ value = os.environ.get(key, '').lower() TRUTHY_ENVIRONS = {'true', 'on', 'yes', '1'} return value in TRUTHY_ENVIRONS DEBUG = _boolean_environ('XDOCTEST_DEBUG') or '--debug' in sys.argv DEBUG_PARSER = DEBUG or _boolean_environ('XDOCTEST_DEBUG_PARSER') DEBUG_CORE = DEBUG or _boolean_environ('XDOCTEST_DEBUG_CORE') DEBUG_RUNNER = DEBUG or _boolean_environ('XDOCTEST_DEBUG_RUNNER') DEBUG_DOCTEST = DEBUG or _boolean_environ('XDOCTEST_DEBUG_DOCTEST') Erotemic-xdoctest-fac8308/src/xdoctest/global_state.pyi000066400000000000000000000002271505122333300233370ustar00rootroot00000000000000from _typeshed import Incomplete DEBUG: Incomplete DEBUG_PARSER: Incomplete DEBUG_CORE: Incomplete DEBUG_RUNNER: Incomplete DEBUG_DOCTEST: Incomplete Erotemic-xdoctest-fac8308/src/xdoctest/parser.py000066400000000000000000001173361505122333300220340ustar00rootroot00000000000000""" The XDoctest Parser ------------------- This parses a docstring into one or more "doctest part" *after* the docstrings have been extracted from the source code by either static or dynamic means. Terms and definitions: logical block: a snippet of code that can be executed by itself if given the correct global / local variable context. PS1: The original meaning is "Prompt String 1". For details see: [SE32096]_ [BashPS1]_ [CustomPrompt]_ [GeekPrompt]_. In the context of xdoctest, instead of referring to the prompt prefix, we use PS1 to refer to a line that starts a "logical block" of code. In the original doctest module these all had to be prefixed with ">>>". In xdoctest the prefix is used to simply denote the code is part of a doctest. It does not necessarily mean a new "logical block" is starting. PS2: The original meaning is "Prompt String 2". In the context of xdoctest, instead of referring to the prompt prefix, we use PS2 to refer to a line that continues a "logical block" of code. In the original doctest module these all had to be prefixed with "...". However, xdoctest uses parsing to automatically determine this. want statement: Lines directly after a logical block of code in a doctest indicating the desired result of executing the previous block. While I do believe this AST-based code is a significant improvement over the RE-based builtin doctest parser, I acknowledge that I'm not an AST expert and there is room for improvement here. References: .. [SE32096] https://unix.stackexchange.com/questions/32096/why-is-bashs-prompt-variable-called-ps1 .. [BashPS1] https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#index-PS1 .. [CustomPrompt] https://wiki.archlinux.org/title/Bash/Prompt_customization .. [GeekPrompt] https://web.archive.org/web/20230824025647/https://www.thegeekstuff.com/2008/09/bash-shell-take-control-of-ps1-ps2-ps3-ps4-and-prompt_command/ """ import ast import sys import re import tokenize from xdoctest import utils from xdoctest import directive from xdoctest import exceptions from xdoctest import doctest_part from xdoctest import static_analysis as static from xdoctest import global_state INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) class DoctestParser: r""" Breaks docstrings into parts using the `parse` method. Example: >>> from xdoctest.parser import * # NOQA >>> parser = DoctestParser() >>> doctest_parts = parser.parse( >>> ''' >>> >>> j = 0 >>> >>> for i in range(10): >>> >>> j += 1 >>> >>> print(j) >>> 10 >>> '''.lstrip('\n')) >>> print('\n'.join(list(map(str, doctest_parts)))) Example: >>> # Having multiline strings in doctests can be nice >>> string = utils.codeblock( ''' >>> name = 'name' 'anything' ''') >>> self = DoctestParser() >>> doctest_parts = self.parse(string) >>> print('\n'.join(list(map(str, doctest_parts)))) """ def __init__(self, simulate_repl=False): """ Args: simulate_repl (bool): if True each line will be treated as its own doctest. This more closely mimics the original doctest module. Defaults to False. """ self.simulate_repl = simulate_repl def parse(self, string, info=None): r""" Divide the given string into examples and interleaving text. Args: string (str): The docstring that may contain one or more doctests. info (dict | None): info about where the string came from in case of an error Returns: List[xdoctest.doctest_part.DoctestPart | str]: a list of `DoctestPart` objects and intervening text in the input docstring. CommandLine: python -m xdoctest.parser DoctestParser.parse Example: >>> docstr = ''' >>> A simple docstring contains text followed by an example. >>> >>> numbers = [1, 2, 3, 4] >>> >>> thirds = [x / 3 for x in numbers] >>> >>> print(thirds) >>> [0.33 0.66 1 1.33] >>> ''' >>> from xdoctest import parser >>> self = parser.DoctestParser() >>> results = self.parse(docstr) >>> assert len(results) == 3 >>> for index, result in enumerate(results): >>> print(f'results[{index}] = {result!r}') results[0] = '\nA simple docstring contains text followed by an example.' results[1] = results[2] = Example: >>> s = 'I am a dummy example with two parts' >>> x = 10 >>> print(s) I am a dummy example with two parts >>> s = 'My purpose it so demonstrate how wants work here' >>> print('The new want applies ONLY to stdout') >>> print('given before the last want') >>> ''' this wont hurt the test at all even though its multiline ''' >>> y = 20 The new want applies ONLY to stdout given before the last want >>> # Parts from previous examples are executed in the same context >>> print(x + y) 30 this is simply text, and doesnt apply to the previous doctest the directive is still in effect. Example: >>> from xdoctest.parser import * # NOQA >>> from xdoctest import parser >>> from xdoctest.docstr import docscrape_google >>> from xdoctest import core >>> self = parser.DoctestParser() >>> docstr = self.parse.__doc__ >>> blocks = docscrape_google.split_google_docblocks(docstr) >>> doclineno = self.parse.__func__.__code__.co_firstlineno >>> key, (string, offset) = blocks[-2] >>> self._label_docsrc_lines(string) >>> doctest_parts = self.parse(string) >>> # each part with a want-string needs to be broken in two >>> assert len(doctest_parts) == 6 >>> len(doctest_parts) """ if global_state.DEBUG_PARSER > 1: print('\n===== PARSE ====') if sys.version_info.major == 2: # nocover string = utils.ensure_unicode(string) if not isinstance(string, str): raise TypeError('Expected string but got {!r}'.format(string)) # If all lines begin with the same indentation, then strip it. min_indent = _min_indentation(string) if min_indent > 0: string = '\n'.join([ln[min_indent:] for ln in string.splitlines()]) labeled_lines = None grouped_lines = None all_parts = None try: labeled_lines = self._label_docsrc_lines(string) grouped_lines = self._group_labeled_lines(labeled_lines) all_parts = list(self._package_groups(grouped_lines)) except Exception as orig_ex: if labeled_lines is None: failpoint = '_label_docsrc_lines' elif grouped_lines is None: failpoint = '_group_labeled_lines' elif all_parts is None: failpoint = '_package_groups' if global_state.DEBUG_PARSER: print('') print('!!! FAILED !!!') print('failpoint = {!r}'.format(failpoint)) import ubelt as ub import traceback tb_text = traceback.format_exc() tb_text = ub.highlight_code(tb_text) tb_text = ub.indent(tb_text) print(tb_text) print('Failed to parse string = <{[<{[<{[ # xdoc debug') print(string) print(']}>]}>]}> # xdoc debug end string') print('info = {}'.format(ub.repr2(info))) print('-----') print('orig_ex = {}'.format(orig_ex)) print('labeled_lines = {}'.format(ub.repr2(labeled_lines))) print('grouped_lines = {}'.format(ub.repr2(grouped_lines, nl=3))) print('all_parts = {}'.format(ub.repr2(all_parts))) print('') # sys.exit(1) raise exceptions.DoctestParseError( 'Failed to parse doctest in {}'.format(failpoint), string=string, info=info, orig_ex=orig_ex) if global_state.DEBUG_PARSER > 1: print('\n===== FINISHED PARSE ====') return all_parts def _package_groups(self, grouped_lines): if global_state.DEBUG_PARSER > 1: import ubelt as ub print('') print('grouped_lines = {}'.format(ub.repr2(grouped_lines, nl=2))) lineno = 0 for chunk in grouped_lines: if isinstance(chunk, tuple): slines, wlines = chunk for example in self._package_chunk(slines, wlines, lineno): yield example lineno += len(slines) + len(wlines) else: text_part = '\n'.join(chunk) yield text_part lineno += len(chunk) if global_state.DEBUG_PARSER > 1: print('') def _package_chunk(self, raw_source_lines, raw_want_lines, lineno=0): """ if `self.simulate_repl` is True, then each statement is broken into its own part. Otherwise, statements are grouped by the closest `want` statement. TODO: - [ ] EXCEPT IN CASES OF EXPLICIT CONTINUATION Example: >>> from xdoctest.parser import * >>> raw_source_lines = ['>>> "string"'] >>> raw_want_lines = ['string'] >>> self = DoctestParser() >>> part, = self._package_chunk(raw_source_lines, raw_want_lines) >>> part.source '"string"' >>> part.want 'string' """ if global_state.DEBUG_PARSER > 1: print('') match = INDENT_RE.search(raw_source_lines[0]) line_indent = 0 if match is None else (match.end() - match.start()) source_lines = [p[line_indent:] for p in raw_source_lines] want_lines = [p[line_indent:] for p in raw_want_lines] # TODO: # - [ ] Fix pytorch indentation issue here exec_source_lines = [p[4:] for p in source_lines] if global_state.DEBUG_PARSER > 1: print(' * locate ps1 lines') # Find the line number of each standalone statement ps1_linenos, mode_hint = self._locate_ps1_linenos(source_lines) if global_state.DEBUG_PARSER > 1: print('mode_hint = {!r}'.format(mode_hint)) print(' * located ps1 lines') print(f'ps1_linenos={ps1_linenos}') # Find all directives here: # A directive necessarily will split a doctest into multiple parts # There are two types: block directives and inline-directives # * Block directives must exist on their own PS1 line # * Block directives insert a breakpoint before # * Inline directives may be on a PS1 or PS2 line # * Inline directives inserts a breakpoint before and after # First find block directives which must exist on there own PS1 line break_linenos = [] ps1_to_directive = {} for s1, s2 in zip(ps1_linenos, ps1_linenos[1:] + [None]): lines = exec_source_lines[s1:s2] directives = list(directive.Directive.extract('\n'.join(lines))) if directives: ps1_to_directive[s1] = directives break_linenos.append(s1) if directives[0].inline: if s2 is not None: break_linenos.append(s2) if global_state.DEBUG_PARSER > 3: print(f'break_linenos={break_linenos}') def slice_example(s1, s2, want_lines=None): exec_lines = exec_source_lines[s1:s2] orig_lines = source_lines[s1:s2] directives = ps1_to_directive.get(s1, None) example = doctest_part.DoctestPart(exec_lines, want_lines=want_lines, orig_lines=orig_lines, line_offset=lineno + s1, directives=directives) return example s1 = 0 s2 = 0 if self.simulate_repl: # Break down first parts which dont have any want for s1, s2 in zip(ps1_linenos, ps1_linenos[1:]): example = slice_example(s1, s2) yield example s1 = s2 else: if break_linenos: break_linenos = sorted(set([0] + break_linenos)) # directives are forcing us to further breakup the parts for s1, s2 in zip(break_linenos, break_linenos[1:]): example = slice_example(s1, s2) yield example s1 = s2 if want_lines and mode_hint in {'eval', 'single'}: # Whenever the evaluation of the final line needs to be tested # against want, that line must be separated into its own part. # We break the last line off so we can eval its value, but keep # previous groupings. s2 = ps1_linenos[-1] if s2 != s1: # make sure the last line is not the only line example = slice_example(s1, s2) yield example s1 = s2 s2 = None example = slice_example(s1, s2, want_lines) # if mode_hint is False: # mode_hint = 'exec' # if mode_hint is True: # mode_hint = 'eval' if not bool(want_lines): example.compile_mode = 'exec' else: assert mode_hint in {'eval', 'exec', 'single'} example.compile_mode = mode_hint if global_state.DEBUG_PARSER > 1: print('example.compile_mode = {!r}'.format(example.compile_mode)) print('') yield example def _group_labeled_lines(self, labeled_lines): """ Group labeled lines into logical parts to be executed together Returns: List[List[str] | Tuple[List[str], str]]: A list of parts. Text parts are just returned as a list of lines. Executable parts are returned as a tuple of source lines and an optional "want" statement. """ if global_state.DEBUG_PARSER > 1: print('') # Now that lines have types, groups them. This could have done this # above, but functionality is split for readability. prev_source = None grouped_lines = [] # WORKON_BACKWARDS_COMPAT_CONTINUE_EVAL # Break up explicit continuations for backwards compat groups = [] current = [] state = None if global_state.DEBUG_PARSER > 4: print('labeled_lines = {!r}'.format(labeled_lines)) # Need to ensure that old-style continuations with want statements are # placed in their own group, so they can be executed as "single". for left, mid, right in _iterthree(labeled_lines, pad_value=(None, None)): if left[0] != mid[0] or (mid[0] == 'dsrc' and right[0] == 'dcnt'): if not (left[0] == 'dsrc' and mid[0] == 'dcnt'): # Start a new group if state is not None: groups.append((state, current)) state = mid[0] current = [] current.append(mid) if current: groups.append((state, current)) if global_state.DEBUG_PARSER > 4: print('groups = {!r}'.format(groups)) # need to merge consecutive dsrc groups without want statements merged_groups = [] current = [] state = None for left, mid, right in _iterthree(groups, pad_value=(None, None)): # Merge consecutive groups unless it is followed by a want if left[0] == mid[0] and right[0] != 'want': # extend the previous group current.extend(mid[1]) else: # start a new group if state is not None: merged_groups.append((left[0], current)) state = mid[0] current = [] current.extend(mid[1]) if current: merged_groups.append((state, current)) # More iterating and grouping. This section needs a careful rewrite prev_source = None grouped_lines = [] for state, group in merged_groups: block = [t[1] for t in group] if state == 'text': if prev_source is not None: # accept a source block without a want block grouped_lines.append((prev_source, '')) prev_source = None # accept the text grouped_lines.append(block) elif state == 'want': assert prev_source is not None, 'impossible' grouped_lines.append((prev_source, block)) prev_source = None elif state in {'dsrc', 'dcnt'}: if prev_source is not None: # accept a source block without a want block grouped_lines.append((prev_source, '')) prev_source = None # need to check if there is a want after us prev_source = block # Case where last block is source if prev_source: grouped_lines.append((prev_source, '')) if global_state.DEBUG_PARSER > 1: # nocover print('') return grouped_lines def _locate_ps1_linenos(self, source_lines): """ Determines which lines in the source begin a "logical block" of code. Args: source_lines (List[str]): lines belonging only to the doctest src these will be unindented, prefixed, and without any want. Returns: Tuple[List[int], bool]: linenos is the first value a list of indices indicating which lines are considered "PS1" and mode_hint, the second value, is a flag indicating if the final line should be considered for a got/want assertion. Example: >>> self = DoctestParser() >>> source_lines = ['>>> def foo():', '>>> return 0', '>>> 3'] >>> linenos, mode_hint = self._locate_ps1_linenos(source_lines) >>> assert linenos == [0, 2] >>> assert mode_hint == 'eval' Example: >>> from xdoctest.parser import * # NOQA >>> self = DoctestParser() >>> source_lines = ['>>> x = [1, 2, ', '>>> 3, 4]', '>>> print(len(x))'] >>> linenos, mode_hint = self._locate_ps1_linenos(source_lines) >>> assert linenos == [0, 2] >>> assert mode_hint == 'eval' Example: >>> from xdoctest.parser import * # NOQA >>> self = DoctestParser() >>> source_lines = [ >>> '>>> x = 1', >>> '>>> try: raise Exception', >>> '>>> except Exception: pass', >>> '...', >>> ] >>> linenos, mode_hint = self._locate_ps1_linenos(source_lines) >>> assert linenos == [0, 1] >>> assert mode_hint == 'exec' Example: >>> from xdoctest.parser import * # NOQA >>> self = DoctestParser() >>> source_lines = [ >>> '>>> import os; print(os)', >>> '...', >>> ] >>> linenos, mode_hint = self._locate_ps1_linenos(source_lines) >>> assert linenos == [0] >>> assert mode_hint == 'single' Example: >>> # We should ensure that decorators are PS1 lines >>> from xdoctest.parser import * # NOQA >>> self = DoctestParser() >>> source_lines = [ >>> '>>> # foo', >>> '>>> @foo', >>> '... def bar():', >>> '... ...', >>> ] >>> linenos, mode_hint = self._locate_ps1_linenos(source_lines) >>> print(f'linenos={linenos}') >>> assert linenos == [0, 1] """ # Strip indentation (and PS1 / PS2 from source) exec_source_lines = [p[4:] for p in source_lines] def _hack_comment_statements(lines): # Hack to make comments appear like executable statements # note, this hack never leaves this function because we only are # returning line numbers. # FIXME: there is probably a better way to do this. def balanced_intervals(lines): """ Finds intervals of balanced nesting syntax Args: lines (List[str]): lines of source code """ intervals = [] a = len(lines) - 1 b = len(lines) while b > 0: # move the head pointer up until we become balanced while not static.is_balanced_statement(lines[a:b], only_tokens=True) and a >= 0: a -= 1 if a < 0: raise exceptions.IncompleteParseError( 'ill-formed doctest: cannot find balanced ps1 lines.') # we found a balanced interval intervals.append((a, b)) b = a a = a - 1 intervals = intervals[::-1] return intervals intervals = balanced_intervals(lines) interval_starts = {t[0] for t in intervals} for i, line in enumerate(lines): if i in interval_starts and line.startswith('#'): # Replace any comment that is not within an interval with a # statement, so ast.parse will record its line number yield '_._ = None' else: yield line exec_source_lines = list(_hack_comment_statements(exec_source_lines)) source_block = '\n'.join(exec_source_lines) try: pt = static.six_axt_parse(source_block) except SyntaxError as syn_ex: # Assign missing information to the syntax error. if syn_ex.text is None: if syn_ex.lineno is not None: # Grab the line where the error occurs # (why is this not populated in SyntaxError by default?) # (because filename does not point to a valid loc) line = source_block.split('\n')[syn_ex.lineno - 1] syn_ex.text = line + '\n' raise syn_ex # print(ast.dump(pt)) # print('pt = {!r}'.format(pt)) statement_nodes = pt.body ps1_linenos = [node.lineno - 1 for node in statement_nodes] if 1: # Get PS1 line numbers of statements accounting for decorators ps1_linenos = [] for node in statement_nodes: if hasattr(node, 'decorator_list') and node.decorator_list: lineno = node.decorator_list[0].lineno - 1 else: lineno = node.lineno - 1 ps1_linenos.append(lineno) # Respect any line explicitly defined as PS2 (via its prefix) ps2_linenos = { x for x, p in enumerate(source_lines) if p[:4] != '>>> ' } ps1_linenos = sorted(set(ps1_linenos).difference(ps2_linenos)) # There are 3 ways to compile python code # exec, eval, and single. # We almost always want to exec, but if we want to match the return # value of the function, we will need to run it in eval or single mode. mode_hint = 'exec' if len(statement_nodes) == 0: mode_hint = 'exec' else: # Is the last statement evaluate-able? if isinstance(statement_nodes[-1], ast.Expr): # This should just be an Expr in python3 # (todo: ensure this is true) mode_hint = 'eval' # WORKON_BACKWARDS_COMPAT_CONTINUE_EVAL: # Force doctests parts to evaluate in backwards compatible "single" # mode when using old style doctest syntax. if len(source_lines) > 1: if source_lines[0].startswith('>>> '): if all(_hasprefix(s, ('...',)) for s in source_lines[1:]): mode_hint = 'single' if mode_hint == 'eval': # Also check the tokens in the source lines to look for semicolons # to fix #108 # Only iterate through non-empty lines otherwise tokenize will stop short # TODO: we probably could just save the tokens if we got them earlier? iterable = (line for line in exec_source_lines if line) def _readline(): return next(iterable) # We cannot eval a statement with a semicolon in it # Single should work. if any(t.type == tokenize.OP and t.string == ';' for t in tokenize.generate_tokens(_readline)): mode_hint = 'single' return ps1_linenos, mode_hint def _label_docsrc_lines(self, string): """ Give each line in the docstring a label so we can distinguish what parts are text, what parts are code, and what parts are "want" string. Args: string (str): doctest source Returns: List[Tuple[str, str]]: labeled_lines - the above source broken up by lines, each with a label indicating its type for later use in parsing. TODO: - [ ] Sphinx does not parse this doctest properly Example: >>> from xdoctest.parser import * >>> # Having multiline strings in doctests can be nice >>> string = utils.codeblock( ''' text >>> items = ['also', 'nice', 'to', 'not', 'worry', >>> 'about', '...', 'vs', '>>>'] ... print('but its still allowed') but its still allowed more text ''') >>> self = DoctestParser() >>> labeled = self._label_docsrc_lines(string) >>> expected = [ >>> ('text', 'text'), >>> ('dsrc', ">>> items = ['also', 'nice', 'to', 'not', 'worry',"), >>> ('dsrc', ">>> 'about', '...', 'vs', '>>>']"), >>> ('dcnt', "... print('but its still allowed')"), >>> ('want', 'but its still allowed'), >>> ('text', ''), >>> ('text', 'more text') >>> ] >>> assert labeled == expected """ # parse and differentiate between doctest source and want statements. labeled_lines = [] state_indent = 0 # line states TEXT = 'text' DSRC = 'dsrc' DCNT = 'dcnt' # explicit continuation **new in 0.10.0** WANT = 'want' # Move through states, keeping track of points where states change # text -> [text, dsrc] # dsrc -> [dsrc, dcnt, want, text] # dcnt -> [dsrc, dcnt, want, text] # want -> [want, text, dsrc] prev_state = TEXT curr_state = None line_iter = enumerate(string.splitlines()) for line_idx, line in line_iter: match = INDENT_RE.search(line) line_indent = 0 if match is None else (match.end() - match.start()) if global_state.DEBUG_PARSER: # nocover print('Next line {}: {}'.format(line_idx, line)) print('state_indent = {!r}'.format(state_indent)) print('match = {!r}'.format(match)) print('line_indent = {!r}'.format(line_indent)) norm_line = line[state_indent:] # Normalize line indentation strip_line = line.strip() # Check prev_state transitions if prev_state == TEXT: # text transitions to source whenever a PS1 line is encountered # the PS1(>>>) can be at an arbitrary indentation if _hasprefix(strip_line, ('>>>',)): curr_state = DSRC else: curr_state = TEXT elif prev_state == WANT: # blank lines terminate wants if len(strip_line) == 0: curr_state = TEXT # source-inconsistent indentation terminates want elif _hasprefix(line.strip(), ('>>>',)): curr_state = DSRC elif line_indent < state_indent: curr_state = TEXT else: curr_state = WANT elif prev_state in {DSRC, DCNT}: # pragma: nobranch if len(strip_line) == 0 or line_indent < state_indent: curr_state = TEXT # allow source to continue with either PS1 or PS2 elif _hasprefix(norm_line, ('>>>', '...')): if strip_line == '...': # TODO: add mechanism for checking next line. # if the next line is also a continuation # then dont treat this as an ellipses if prev_state == DCNT: # Hack to fix continuation issue curr_state = DCNT else: curr_state = WANT else: if _hasprefix(norm_line, ('...',)): curr_state = DCNT else: curr_state = DSRC else: curr_state = WANT else: # nocover # This should never happen raise AssertionError('Unknown state prev_state={}'.format( prev_state)) # Handle transitions if prev_state != curr_state: # Handle start of new states if curr_state == TEXT: state_indent = 0 if curr_state in {DSRC, DCNT}: # Start a new source state_indent = line_indent # renormalize line when indentation changes norm_line = line[state_indent:] # continue current state if curr_state in {DSRC, DCNT}: # source parts may consume more than one line try: if global_state.DEBUG_PARSER: # nocover print('completing source') for part, norm_line in _complete_source(line, state_indent, line_iter): if global_state.DEBUG_PARSER > 4: # nocover print('Append Completion Line:') print('part = {!r}'.format(part)) print('norm_line = {!r}'.format(norm_line)) print('curr_state = {!r}'.format(curr_state)) if _hasprefix(norm_line, ('...',)): curr_state = DCNT labeled_lines.append((curr_state, part)) except exceptions.IncompleteParseError: raise except SyntaxError: if global_state.DEBUG_PARSER: # nocover print('') raise elif curr_state == WANT: labeled_lines.append((curr_state, line)) elif curr_state == TEXT: labeled_lines.append((curr_state, line)) prev_state = curr_state if global_state.DEBUG_PARSER > 1: # nocover import ubelt as ub # if global_state.DEBUG_PARSER > 3: # print('string = {!r}'.format(string)) print('') return labeled_lines def _min_indentation(s): "Return the minimum indentation of any non-blank line in `s`" indents = [len(indent) for indent in INDENT_RE.findall(s)] if len(indents) > 0: return min(indents) else: return 0 def _complete_source(line, state_indent, line_iter): """ helper remove lines from the iterator if they are needed to complete source This uses :func:`static.is_balanced_statement` to do the heavy lifting Example: >>> from xdoctest.parser import * # NOQA >>> from xdoctest.parser import _complete_source >>> state_indent = 0 >>> line = '>>> x = { # The line is not finished' >>> remain_lines = ['>>> 1:2,', '>>> 3:4,', '>>> 5:6}', '>>> y = 7'] >>> line_iter = enumerate(remain_lines, start=1) >>> finished = list(_complete_source(line, state_indent, line_iter)) >>> final = chr(10).join([t[1] for t in finished]) >>> print(final) """ norm_line = line[state_indent:] # Normalize line indentation prefix = norm_line[:4] suffix = norm_line[4:] assert prefix.strip() in {'>>>', '...'}, ( 'unexpected prefix: {!r}'.format(prefix)) yield line, norm_line source_parts = [suffix] # These hacks actually modify the input doctest slightly HACK_TRIPLE_QUOTE_FIX = True try: while not static.is_balanced_statement(source_parts, only_tokens=True): line_idx, next_line = next(line_iter) norm_line = next_line[state_indent:] prefix = norm_line[:4] suffix = norm_line[4:] if prefix.strip() not in {'>>>', '...', ''}: # nocover error = True if HACK_TRIPLE_QUOTE_FIX: # TODO: make a more robust patch if any("'''" in s or '"""' in s for s in source_parts): # print('HACK FIXING TRIPLE QUOTE') next_line = next_line[:state_indent] + '... ' + norm_line norm_line = '... ' + norm_line prefix = '' suffix = norm_line error = False if error: if global_state.DEBUG_PARSER: print(' * !!!ERROR!!!') print(' * source_parts = {!r}'.format(source_parts)) print(' * prefix = {!r}'.format(prefix)) print(' * norm_line = {!r}'.format(norm_line)) print(' * !!!!!!!!!!!!!') raise SyntaxError( 'Bad indentation in doctest on line {}: {!r}'.format( line_idx, next_line)) source_parts.append(suffix) yield next_line, norm_line except StopIteration: if global_state.DEBUG_PARSER: import ubelt as ub print('') import traceback tb_text = traceback.format_exc() tb_text = ub.highlight_code(tb_text) tb_text = ub.indent(tb_text) print(tb_text) # print(' * line_iter = {!r}'.format(line_iter)) print(' * state_indent = {!r}'.format(state_indent)) print(' * line = {!r}'.format(line)) # print('source =\n{}'.format('\n'.join(source_parts))) print('# Ensure that the following line should actually fail') print('source_parts = {}'.format(ub.repr2(source_parts, nl=2))) print(ub.codeblock( r''' from xdoctest import static_analysis as static static.is_balanced_statement(source_parts, only_tokens=False) static.is_balanced_statement(source_parts, only_tokens=True) text = '\n'.join(source_parts) print(text) static.six_axt_parse(text) ''')) print('') # sys.exit(1) # TODO: use AST to reparse all doctest parts to discover where the # syntax error in the doctest is and then raise it. raise exceptions.IncompleteParseError( 'ill-formed doctest: all parts have been processed ' 'but the doctest source is not balanced') else: if global_state.DEBUG_PARSER > 1: import ubelt as ub print('') # print(' * line_iter = {!r}'.format(line_iter)) print('source_parts = {}'.format(ub.repr2(source_parts, nl=2))) print('') def _iterthree(items, pad_value=None): """ Iterate over a sliding window of size 3 with None padding on both sides. Example: >>> from xdoctest.parser import * >>> print(list(_iterthree([]))) >>> print(list(_iterthree(range(1)))) >>> print(list(_iterthree([1, 2]))) >>> print(list(_iterthree([1, 2, 3]))) >>> print(list(_iterthree(range(4)))) >>> print(list(_iterthree(range(7)))) """ # Initialize the return window to pad values left = mid = right = pad_value # Create an iterator item_iter = iter(items) # Check the first item, if we dont have it, then dont return anything try: mid = next(item_iter) except StopIteration: return else: # Check the second item, if we dont have it, we have to return # the values we've seen so far. try: right = next(item_iter) except StopIteration: yield left, mid, right return else: # If we have both mid and right, then yield both yield left, mid, right left, mid = mid, right # If there is still data for right in item_iter: yield left, mid, right left, mid = mid, right right = pad_value yield left, mid, right def _hasprefix(line, prefixes): """ helper prefix test """ # if not isinstance(prefixes, tuple): # prefixes = [prefixes] return any(line == p or line.startswith(p + ' ') for p in prefixes) if __name__ == '__main__': """ CommandLine: python -m xdoctest.core python -m xdoctest.parser all """ import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/parser.pyi000066400000000000000000000006741505122333300222010ustar00rootroot00000000000000from typing import List import xdoctest import xdoctest.doctest_part from _typeshed import Incomplete INDENT_RE: Incomplete NEED_16806_WORKAROUND: Incomplete class DoctestParser: simulate_repl: bool def __init__(self, simulate_repl: bool = False) -> None: ... def parse( self, string: str, info: dict | None = None ) -> List[xdoctest.doctest_part.DoctestPart | str]: ... Erotemic-xdoctest-fac8308/src/xdoctest/plugin.py000066400000000000000000000324621505122333300220320ustar00rootroot00000000000000""" The Pytest XDoctest Plugin -------------------------- This file is registered as a pytest plugin when you install xdoctest. By executing pytest with ``--xdoctest-modules`` (or simply ``--xdoctest``), this plugin will be enabled. This also disables the original builtin doctest plugin. When xdoctest is enabled, pytest will discover and run doctests in modules and test files using xdoctest's improved parser and runtime environment. To ensure maximum backwards compatibility with the original doctest module, this code is heavilly based on ``pytest/_pytest/doctest.py`` plugin file in https://github.com/pytest-dev/pytest """ import pytest from _pytest._code import code from _pytest import fixtures try: from packaging.version import parse as Version except ImportError: # nocover from distutils.version import LooseVersion as Version _PYTEST_IS_GE_620 = Version(pytest.__version__) >= Version('6.2.0') _PYTEST_IS_GE_800 = Version(pytest.__version__) >= Version('8.0.0') if _PYTEST_IS_GE_800: from typing import Dict from _pytest.fixtures import TopRequest # Ban-list: extend if other plugins are known to break `xdoctest` _INCOMPATIBLE_PLUGINS = frozenset({'doctest'}) # def print(text): # """ Hack so we can get stdout when debugging the plugin file """ # import os # fpath = os.path.expanduser('~/plugin.stdout.txt') # with open(fpath, 'a') as file: # file.write(str(text) + '\n') __docstubs__ = """ import xdoctest.doctest_example """ def pytest_configure(config): manager = config.pluginmanager all_plugins = {manager.get_name(plugin): plugin for plugin in manager.get_plugins()} # If we're using `xdoctest`, unregister plugins on the ban-list if getattr(config.option, 'xdoctestmodules', False): for incompatible in _INCOMPATIBLE_PLUGINS.intersection(all_plugins): manager.unregister(all_plugins[incompatible]) def pytest_addoption(parser): # TODO: make this programmatically mirror the argparse in __main__ from xdoctest import core def str_lower(x): # python2 fix return str.lower(str(x)) group = parser.getgroup('collect') parser.addini('xdoctest_encoding', 'encoding used for xdoctest files', default='utf-8') # parser.addini('xdoctest_options', 'default directive flags for doctests', # type="args", default=["+ELLIPSIS"]) group.addoption('--xdoctest-modules', '--xdoctest', '--xdoc', action='store_true', default=False, help='Run doctests in all .py modules using new style parsing', dest='xdoctestmodules') group.addoption('--xdoctest-glob', '--xdoc-glob', action='append', default=[], metavar='pat', help=( 'Text files matching this pattern will be checked ' 'for doctests. This option may be specified multiple ' 'times. XDoctest does not check any text files by ' 'default. For compatibility with doctest set this to ' 'test*.txt'), dest='xdoctestglob') group.addoption('--xdoctest-ignore-syntax-errors', action='store_true', default=False, help='Ignore xdoctest SyntaxErrors', dest='xdoctest_ignore_syntax_errors') group.addoption('--xdoctest-style', '--xdoc-style', type=str_lower, default='freeform', help='Basic style used to write doctests', choices=core.DOCTEST_STYLES, dest='xdoctest_style') group.addoption('--xdoctest-analysis', '--xdoc-analysis', type=str_lower, default='auto', help=('How doctests are collected. ' 'Can either be static, dynamic, or auto'), choices=['static', 'dynamic', 'auto'], dest='xdoctest_analysis') from xdoctest import doctest_example doctest_example.DoctestConfig()._update_argparse_cli( group.addoption, prefix=['xdoctest', 'xdoc'], defaults=dict(verbose=2) ) if pytest.__version__ < '7.': # nocover def pytest_collect_file(path, parent): return _pytest_collect_file(path, parent, fspath=path) def _suffix(path): return path.ext def _match(path, glob): return path.check(fnmatch=glob) else: def pytest_collect_file(file_path, parent): # type: ignore return _pytest_collect_file(file_path, parent, path=file_path) def _suffix(path): return path.suffix def _match(path, glob): return path.match(glob) def _pytest_collect_file(file_path, parent, **path_args): config = parent.config if _suffix(file_path) == ".py": if config.option.xdoctestmodules: if hasattr(XDoctestModule, 'from_parent'): return XDoctestModule.from_parent(parent, **path_args) else: return XDoctestModule(file_path, parent) elif _is_xdoctest(config, file_path, parent): if hasattr(XDoctestTextfile, 'from_parent'): return XDoctestTextfile.from_parent(parent, **path_args) else: return XDoctestTextfile(file_path, parent) def _is_xdoctest(config, path, parent): matched = False if _suffix(path) in ('.txt', '.rst') and parent.session.isinitpath(path): matched = True else: globs = config.getoption("xdoctestglob") for glob in globs: if _match(path, glob): matched = True break return matched class ReprFailXDoctest(code.TerminalRepr): def __init__(self, reprlocation, lines): """ Args: reprlocation (Any): _pytest._code.code.ReprFileLocation where the error happened lines (List[str]): text of the error """ self.reprlocation = reprlocation self.lines = lines def toterminal(self, tw): for line in self.lines: tw.line(line) self.reprlocation.toterminal(tw) class XDoctestItem(pytest.Item): def __init__(self, name, parent, runner=None, dtest=None): """ Args: name (str): parent (Any | None): dtest (xdoctest.doctest_example.DocTest): """ super(XDoctestItem, self).__init__(name, parent) self.cls = XDoctestItem self.dtest = dtest self.obj = None if _PYTEST_IS_GE_800: # Stuff needed for fixture support in pytest > 8.0. fm = self.session._fixturemanager fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) self._fixtureinfo = fixtureinfo self.fixturenames = fixtureinfo.names_closure self._initrequest() else: self.fixture_request = None if _PYTEST_IS_GE_800: @classmethod def from_parent( # type: ignore cls, parent, name, runner=None, dtest=None, ): # incompatible signature due to imposed limits on subclass """The public named constructor.""" return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) @property def example(self): """ Backwards compatibility with older pytest versions """ return self.dtest def _initrequest(self) -> None: assert _PYTEST_IS_GE_800 self.funcargs: Dict[str, object] = {} self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] def setup(self): if _PYTEST_IS_GE_800: self._request._fillfixtures() globs = dict(getfixture=self._request.getfixturevalue) for name, value in self._request.getfixturevalue("xdoctest_namespace").items(): globs[name] = value self.dtest.globs.update(globs) else: if self.dtest is not None: self.fixture_request = _setup_fixtures(self) global_namespace = dict(getfixture=self.fixture_request.getfixturevalue) for name, value in self.fixture_request.getfixturevalue('xdoctest_namespace').items(): global_namespace[name] = value self.dtest.global_namespace.update(global_namespace) def runtest(self): if self.dtest.is_disabled(pytest=True): pytest.skip('doctest encountered global skip directive') # verbose = self.dtest.config['verbose'] self.dtest.run(on_error='raise') if not self.dtest.anything_ran(): pytest.skip('doctest is empty or all parts were skipped') def repr_failure(self, excinfo): """ # Args: # excinfo (_pytest._code.code.ExceptionInfo): # Returns: # ReprFailXDoctest | str | _pytest._code.code.TerminalRepr: """ dtest = self.dtest if dtest.exc_info is not None: lineno = dtest.failed_lineno() type = dtest.exc_info[0] message = type.__name__ reprlocation = code.ReprFileLocation(dtest.fpath, lineno, message) lines = dtest.repr_failure() return ReprFailXDoctest(reprlocation, lines) else: return super(XDoctestItem, self).repr_failure(excinfo) def reportinfo(self): """ Returns: Tuple[str, int, str] """ return self.fspath, self.dtest.lineno, "[xdoctest] %s" % self.name class _XDoctestBase(pytest.Module): def _prepare_internal_config(self): class NamespaceLike: def __init__(self, config): self.config = config def __getitem__(self, attr): return self.config.getvalue('xdoctest_' + attr) def __getattr__(self, attr): return self.config.getvalue('xdoctest_' + attr) ns = NamespaceLike(self.config) from xdoctest import doctest_example self._examp_conf = doctest_example.DoctestConfig()._populate_from_cli(ns) class XDoctestTextfile(_XDoctestBase): obj = None def collect(self): """ Yields: XDoctestItem """ from xdoctest import core encoding = self.config.getini("xdoctest_encoding") text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename global_namespace = {'__name__': '__main__'} self._prepare_internal_config() style = self.config.getvalue('xdoctest_style') _example_iter = core.parse_docstr_examples( text, name, fpath=filename, style=style) for dtest in _example_iter: dtest.global_namespace.update(global_namespace) dtest.config.update(self._examp_conf) if hasattr(XDoctestItem, 'from_parent'): yield XDoctestItem.from_parent( self, name=name, dtest=dtest) else: # direct construction is deprecated yield XDoctestItem(name, self, dtest=dtest) class XDoctestModule(_XDoctestBase): def collect(self): from xdoctest import core modpath = str(self.fspath) style = self.config.getvalue('xdoctest_style') analysis = self.config.getvalue('xdoctest_analysis') self._prepare_internal_config() try: examples = list(core.parse_doctestables(modpath, style=style, analysis=analysis)) except SyntaxError: if self.config.getvalue('xdoctest_ignore_syntax_errors'): pytest.skip('unable to import module %r' % self.fspath) else: raise for dtest in examples: dtest.config.update(self._examp_conf) name = dtest.unique_callname if hasattr(XDoctestItem, 'from_parent'): yield XDoctestItem.from_parent( self, name=name, dtest=dtest) else: # direct construction is deprecated yield XDoctestItem(name, self, dtest=dtest) def _setup_fixtures(xdoctest_item): """ Used by XDoctestTextfile and XDoctestItem to setup fixture information. Args: xdoctest_item (XDoctestItem): Returns: fixtures.FixtureRequest """ def func(): pass xdoctest_item.funcargs = {} fm = xdoctest_item.session._fixturemanager xdoctest_item._fixtureinfo = fm.getfixtureinfo( node=xdoctest_item, func=func, cls=None, funcargs=False) # Note: FixtureRequest may change in the future, we are using # private functionality. Hopefully it wont break, but we should # check to see if there is a better way to do this # https://github.com/pytest-dev/pytest/discussions/8512#discussioncomment-563347 if _PYTEST_IS_GE_620: # The "_ispytest" arg was added in 3.6.1 fixture_request = fixtures.FixtureRequest(xdoctest_item, _ispytest=True) else: fixture_request = fixtures.FixtureRequest(xdoctest_item) fixture_request._fillfixtures() return fixture_request @pytest.fixture(scope='session') def xdoctest_namespace(): """ Inject names into the xdoctest namespace. Returns: Dict """ return dict() Erotemic-xdoctest-fac8308/src/xdoctest/plugin.pyi000066400000000000000000000030051505122333300221720ustar00rootroot00000000000000from typing import Any from typing import List import xdoctest from typing import Tuple from typing import Dict import pytest from _pytest._code import code from _typeshed import Incomplete from collections.abc import Generator import xdoctest.doctest_example __docstubs__: str def pytest_configure(config: pytest.Config) -> Generator[None, None, None]: ... def pytest_addoption(parser): ... def pytest_collect_file(path, parent): ... class ReprFailXDoctest(code.TerminalRepr): reprlocation: Any lines: List[str] def __init__(self, reprlocation: Any, lines: List[str]) -> None: ... def toterminal(self, tw) -> None: ... class XDoctestItem(pytest.Item): cls: Incomplete example: xdoctest.doctest_example.DocTest obj: Incomplete fixture_request: Incomplete def __init__( self, name: str, parent: Any | None, example: xdoctest.doctest_example.DocTest | None = None) -> None: ... def setup(self) -> None: ... def runtest(self) -> None: ... def repr_failure(self, excinfo): ... def reportinfo(self) -> Tuple[str, int, str]: ... class _XDoctestBase(pytest.Module): ... class XDoctestTextfile(_XDoctestBase): obj: Incomplete def collect(self) -> Generator[XDoctestItem, None, None]: ... class XDoctestModule(_XDoctestBase): def collect(self) -> Generator[Any, None, None]: ... def xdoctest_namespace() -> Dict: ... Erotemic-xdoctest-fac8308/src/xdoctest/py.typed000066400000000000000000000000001505122333300216400ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/src/xdoctest/runner.py000066400000000000000000000731041505122333300220430ustar00rootroot00000000000000""" The Native XDoctest Runner -------------------------- This file defines the native xdoctest interface to the collecting, executing, and summarizing the results of running doctests. This is an alternative to running through pytest. Using the XDoctest Runner via the Terminal ------------------------------------------ This interface is exposed via the ``xdoctest.__main__`` script and can be invoked on any module via: ``python -m xdoctest ``, where ```` is the path to. For example to run the tests in this module you could execute: .. code:: bash python -m xdoctest xdoctest.runner all For more details see: .. code:: bash python -m xdoctest --help Using the XDoctest Runner Programmatically ------------------------------------------ This interface may also be run programmatically using ``xdoctest.doctest_module(path)``, which can be placed in the ``__main__`` section of any module as such: .. code:: python if __name__ == '__main__': import xdoctest as xdoc xdoc.doctest_module(__file__) This allows you to invoke the runner on a specific module by simply running that module as the main module. Via: ``python -m ``. For example, the this module ends with the previous code, which means you can run the doctests as such: .. code:: bash python -m xdoctest.runner list """ from xdoctest import dynamic_analysis from xdoctest import core from xdoctest import doctest_example from xdoctest import utils from functools import partial import time import types import warnings import sys from xdoctest import global_state def log(msg, verbose, level=1): """ Simple conditional print logger Args: msg (str): message to print verbose (int): verbosity level, higher means more is print level (int): verbosity level at which this is printed. 0 is always, 1 is info, 2 is verbose, 3 is very-verbose. """ if verbose >= level: print(msg) def doctest_callable(func): """ Executes doctests an in-memory function or class. Args: func (callable): live method or class for which we will run its doctests. Example: >>> def inception(text): >>> ''' >>> Example: >>> >>> inception("I heard you liked doctests") >>> ''' >>> print(text) >>> func = inception >>> doctest_callable(func) """ from xdoctest.core import parse_docstr_examples doctests = list(parse_docstr_examples( func.__doc__, callname=func.__name__)) # TODO: can this be hooked up into runner to get nice summaries? for doctest in doctests: # FIXME: each doctest needs a way of getting the globals of the scope # that the parent function was defined in. # HACK: to add module context, this might not be robust. doctest.module = sys.modules[func.__module__] doctest.global_namespace[func.__name__] = func doctest.run(verbose=3) def gather_doctests(doctest_identifiers, style='auto', analysis='auto', verbose=None): raise NotImplementedError('todo') def doctest_module(module_identifier=None, command=None, argv=None, exclude=[], style='auto', verbose=None, config=None, durations=None, analysis='auto'): """ Executes requestsed google-style doctests in a package or module. Main entry point into the testing framework. Args: module_identifier (str | ModuleType | None): The name of / path to the module, or the live module itself. If not specified, dynamic analysis will be used to introspect the module that called this function and that module will be used. This can also contain the callname followed by the ``::`` token. command (str): determines which doctests to run. if command is None, this is determined by parsing sys.argv Value values are 'all' - find and run all tests in a module 'list' - list the tests in a module 'dump' - dumps tests to stdout argv (List[str] | None): if specified, command line flags that might influence beharior. if None uses sys.argv. SeeAlso :func:_update_argparse_cli SeeAlso :func:doctest_example.DoctestConfig._update_argparse_cli style (str): Determines how doctests are recognized and grouped. Can be freeform, google, or auto. verbose (int | None): Verbosity level. 0 - disables all text 1 - minimal printing 3 - verbose printing exclude (List[str]): ignores any modname matching any of these glob-like patterns config (Dict[str, object]): modifies each examples configuration. Defaults and expected keys are documented in :class:`xdoctest.doctest_example.DoctestConfig` durations (int | None): if specified report top N slowest tests analysis (str): determines if doctests are found using static or dynamic analysis. Returns: Dict[str, Any]: run_summary Example: >>> modname = 'xdoctest.dynamic_analysis' >>> result = doctest_module(modname, 'list', argv=['']) Example: >>> # xdoctest: +SKIP >>> # Demonstrate different ways "module_identifier" can be specified >>> # >>> # Using a module name >>> result = doctest_module('xdoctest.static_analysis') >>> # >>> # Using a module path >>> result = doctest_module(os.expandpath('~/code/xdoctest/src/xdoctest/static_analysis.py')) >>> # >>> # Using a module itself >>> from xdoctest import runner >>> result = doctest_module(runner) >>> # >>> # Using a module name and a specific callname >>> from xdoctest import runner >>> result = doctest_module('xdoctest.static_analysis::parse_static_value') """ _debug = partial(log, verbose=global_state.DEBUG_RUNNER) _debug('------+ DEBUG +------') _debug('CALLED doctest_module') _debug('exclude = {!r}'.format(exclude)) _debug('argv = {!r}'.format(argv)) _debug('command = {!r}'.format(command)) _debug('module_identifier = {!r}'.format(module_identifier)) _debug('durations = {!r}'.format(durations)) _debug('config = {!r}'.format(config)) _debug('verbose = {!r}'.format(verbose)) _debug('style = {!r}'.format(style)) _debug('------+ /DEBUG +------') modinfo = { 'modpath': None, 'modname': None, 'module': None, } # TODO: allow a list of doctest specifiers to be passed. # TODO: abstract into a "gather" doctest method. if module_identifier is None: # Determine package name via caller if not specified frame_parent = dynamic_analysis.get_parent_frame() if '__file__' in frame_parent.f_globals: modinfo['modpath'] = frame_parent.f_globals['__file__'] else: # Module might not exist as a path on disk, we might be trying to # test an IPython session. modinfo['modname'] = frame_parent.f_globals['__name__'] modinfo['module'] = sys.modules[modinfo['modname']] else: if isinstance(module_identifier, types.ModuleType): modinfo['module'] = module_identifier modinfo['modpath'] = modinfo['module'].__file__ else: # Allow the modname to contain the name of the test to be run if '::' in module_identifier: if command is None: modpath_or_name, command = module_identifier.split('::') modinfo['modpath'] = core._rectify_to_modpath(modpath_or_name) else: raise ValueError('Command must be None if using :: syntax') else: modinfo['modpath'] = core._rectify_to_modpath(module_identifier) if config is None: config = doctest_example.DoctestConfig() command, style, verbose = _parse_commandline(command, style, verbose, argv) _log = partial(log, verbose=verbose) # Usually the "parseable_identifier" (i.e. the object we will extract the # docstrings from) is a path to a module, but sometimes we will only be # given the live module itself, hence the abstraction. if modinfo['modpath'] is None: parsable_identifier = modinfo['module'] else: parsable_identifier = modinfo['modpath'] _log('Start doctest_module({!r})'.format(parsable_identifier), level=2) _log('Listing tests', level=2) if command is None: # Display help if command is not specified _log('Not testname given. Use `all` to run everything or' ' pick from a list of valid choices:') command = 'list' # TODO: command should not be allowed to be the requested doctest name in # case it conflicts with an existing command. This probably requires an API # change to this function. gather_all = (command == 'all' or command == 'dump') tic = time.time() # Parse all valid examples with warnings.catch_warnings(record=True) as parse_warnlist: examples = list(core.parse_doctestables( parsable_identifier, exclude=exclude, style=style, analysis=analysis)) # Set each example mode to native to signal that we are using the # native xdoctest runner instead of the pytest runner for example in examples: example.mode = 'native' if command == 'list': if len(examples) == 0: _log('... no docstrings with examples found') else: _log(' ' + '\n '.join([example.cmdline # + ' @ ' + str(example.lineno) for example in examples])) run_summary = {'action': 'list'} else: _log('gathering tests', level=2) enabled_examples = [] for example in examples: if gather_all or command in example.valid_testnames: if gather_all and example.is_disabled(): continue enabled_examples.append(example) if len(enabled_examples) == 0: # Check for zero-arg funcs for example in _gather_zero_arg_examples(parsable_identifier): if command in example.valid_testnames: enabled_examples.append(example) elif command in ['zero-all', 'zero', 'zero_all', 'zero-args']: enabled_examples.append(example) if config: for example in enabled_examples: example.config.update(config) if command == 'dump': # format the doctests as normal unit tests _log('dumping tests to stdout', level=2) module_text = _convert_to_test_module(enabled_examples) _log(module_text, level=0) run_summary = {'action': 'dump'} else: # Run the gathered doctest examples RANDOMIZE_ORDER = False if RANDOMIZE_ORDER: # randomize the order in which tests are run import random random.shuffle(enabled_examples) run_summary = _run_examples(enabled_examples, verbose, config, _log=_log) toc = time.time() n_seconds = toc - tic #### TODO: callback and plugin system. # Can probably reuse some other library for this. # Print final summary info in a style similar to pytest if verbose >= 0 and run_summary: _print_summary_report(run_summary, parse_warnlist, n_seconds, enabled_examples, durations, config=config, _log=_log) # Hidden experimental feature import os insert_skip_directive_above_failures = ( os.environ.get('XDOCTEST_INSERT_SKIP_DIRECTIVE_ABOVE_FAILURES') or '--insert-skip-directive-above-failures' in sys.argv ) AFTER_ALL_HOOKS = [] if insert_skip_directive_above_failures: AFTER_ALL_HOOKS.append(_auto_disable_failing_tests_hook) for hook in AFTER_ALL_HOOKS: context = { 'enabled_example': enabled_examples, 'run_summary': run_summary, } hook(context) return run_summary def _auto_disable_failing_tests_hook(context): """ Experimental feature to modify code based on failing tests. This should likely be moved to its own submodule. """ from collections import defaultdict run_summary = context['run_summary'] failing_examples = run_summary['failed'] path_to_failed_linos = defaultdict(list) for example in failing_examples: # We could disable at the point of failure, or at the start of the # test. While I'm tempted to use the failing one, as it makes it more # clear what to fix, that introduces potential incompatibility with # got/want style errors, so let's default to the first line. WHERE_INSERT = 'start-of-doctest' failed_line_number = example.failed_lineno() start_line_number = example.lineno if WHERE_INSERT == 'start-of-doctest': insert_line_number = start_line_number elif WHERE_INSERT == 'failing-location': insert_line_number = failed_line_number path_to_failed_linos[example.fpath].append(insert_line_number) for fpath, skip_linenos in path_to_failed_linos.items(): print('modifying fpath={}'.format(fpath)) with open(fpath, 'r') as file: lines = file.readlines() # Insert the lines in reverse order for lineno in sorted(skip_linenos)[::-1]: line_idx = lineno - 1 failed_line = lines[line_idx] num_indent_chars = len(failed_line) - len(failed_line.lstrip()) indent = failed_line[:num_indent_chars] endl = '\n' # is there a case we use a different line end? new_line = ''.join([indent, '>>> # xdoctest: +SKIP', endl]) # Dont insert the same line twice, its failing for some other reason # Probably a pre-import if failed_line != new_line: lines.insert(line_idx, new_line) with open(fpath, 'w') as file: file.write(''.join(lines)) def _convert_to_test_module(enabled_examples): """ Logic for the "dumps" command. Converts all doctests to unit tests that can exist in a standalone module """ dump_config = { 'remove_import_star': True, } # from xdoctest import static_analysis as static module_lines = [] for example in enabled_examples: # Create a unit-testable function for this example func_name = 'test_' + example.modname.replace('.', '_') + '_' + example.callname.replace('.', '_') body_lines = [] docstr_lines = [ '"""', 'converted from {}'.format(example.node), '"""', ] header_lines = [] if example.config['global_exec'] is not None: # It might also be a reasonable idea to put the global exec at the # top, but this is more accurate to how it actually runs. # hack for newlines, not sure why it is escaping them. global_lines = example.config['global_exec'].split('\\n') header_lines.extend([g + ' # NOQA' for g in global_lines]) for part in example._parts: if dump_config['remove_import_star']: new_exec_lines = [] for line in part.exec_lines: # TODO: this is not robust, need AST magic here # import ubelt as ub # print('line = {}'.format(ub.repr2(line, nl=1))) # stripped = static._strip_hashtag_comments_and_newlines(line) if ' import *' in line: continue new_exec_lines.append(line) part.exec_lines = new_exec_lines body_part = part.format_part(linenos=False, want=False, prefix=False, colored=False, partnos=False) if part.want: want_text = '# doctest want:\n' want_text += utils.indent(part.want, '# ') body_part += '\n' + want_text body_lines.append(body_part) if len(body_lines) == 0: body_lines.append('...') body = '\n'.join(docstr_lines + header_lines + body_lines) try: undefined = sorted(undefined_names(body)) if undefined: # Assume we can find them in the parent module header_lines.append('from {} import {}'.format(example.modname, ', '.join(undefined))) body = '\n'.join(docstr_lines + header_lines + body_lines) except Exception: warnings.warn('Unable to check for undefined names without pyflakes') # if '+SKIP' in body: # continue # if '+REQUIRES' in body: # continue func_text = 'def {}():\n'.format(func_name) + utils.indent(body) module_lines.append(func_text) module_text = '\n\n\n'.join(module_lines) return module_text def undefined_names(sourcecode): """ Parses source code for undefined names Args: sourcecode (str): code to check for unused names Returns: Set[str]: the unused variable names Example: >>> # xdoctest: +REQUIRES(module:pyflakes) >>> print(undefined_names('x = y')) {'y'} """ import pyflakes.api import pyflakes.reporter class CaptureReporter(pyflakes.reporter.Reporter): def __init__(reporter, warningStream, errorStream): reporter.syntax_errors = [] reporter.messages = [] reporter.unexpected = [] def unexpectedError(reporter, filename, msg): reporter.unexpected.append(msg) def syntaxError(reporter, filename, msg, lineno, offset, text): reporter.syntax_errors.append(msg) def flake(reporter, message): reporter.messages.append(message) names = set() reporter = CaptureReporter(None, None) pyflakes.api.check(sourcecode, '_.py', reporter) for msg in reporter.messages: if msg.__class__.__name__.endswith('UndefinedName'): assert len(msg.message_args) == 1 names.add(msg.message_args[0]) return names def _print_summary_report(run_summary, parse_warnlist, n_seconds, enabled_examples, durations, config=None, _log=None): """ Summary report formatting and printing """ def cprint(text, color): if config is not None and config.get('colored', True): _log(utils.color_text(text, color)) else: _log(text) # report errors failed = run_summary.get('failed', []) warned = run_summary.get('warned', []) # report parse-time warnings if parse_warnlist: cprint('\n=== Found {} parse-time warnings ==='.format( len(parse_warnlist)), 'yellow') for warn_idx, warn in enumerate(parse_warnlist, start=1): cprint('--- Parse Warning: {} / {} ---'.format( warn_idx, len(parse_warnlist)), 'yellow') _log(utils.indent( warnings.formatwarning(warn.message, warn.category, warn.filename, warn.lineno))) # report run-time warnings if warned: cprint('\n=== Found {} run-time warnings ==='.format(len(warned)), 'yellow') for warn_idx, example in enumerate(warned, start=1): cprint('--- Runtime Warning: {} / {} ---'.format(warn_idx, len(warned)), 'yellow') _log('example = {!r}'.format(example)) for warn in example.warn_list: _log(utils.indent( warnings.formatwarning(warn.message, warn.category, warn.filename, warn.lineno))) if failed and len(enabled_examples) > 1: # If there is more than one test being run, _log out all the # errors that occurred so they are consolidated in a single place. cprint('\n=== Found {} errors ==='.format(len(failed)), 'red') for fail_idx, example in enumerate(failed, start=1): cprint('--- Error: {} / {} ---'.format(fail_idx, len(failed)), 'red') _log(utils.indent('\n'.join(example.repr_failure()))) # Print command lines to re-run failed tests if failed: cprint('\n=== Failed tests ===', 'red') for example in failed: _log(example.cmdline) # final summary n_passed = run_summary.get('n_passed', 0) n_failed = run_summary.get('n_failed', 0) n_skipped = run_summary.get('n_skipped', 0) n_warnings = len(warned) + len(parse_warnlist) pairs = zip([n_failed, n_passed, n_skipped, n_warnings], ['failed', 'passed', 'skipped', 'warnings']) parts = ['{n} {t}'.format(n=n, t=t) for n, t in pairs if n > 0] _fmtstr = '=== ' + ', '.join(parts) + ' in {n_seconds:.2f} seconds ===' # _fmtstr = '=== ' + ' '.join(parts) + ' in {n_seconds:.2f} seconds ===' summary_line = _fmtstr.format(n_seconds=n_seconds) # color text based on worst type of error if n_failed > 0: cprint(summary_line, 'red') elif n_warnings > 0 or (n_passed == 0 and n_skipped > 0): cprint(summary_line, 'yellow') else: cprint(summary_line, 'green') if durations is not None: times = run_summary.get('times', {}) test_time_tups = sorted(times.items(), key=lambda x: x[1]) if durations > 0: test_time_tups = test_time_tups[-durations:] for example, n_secs in test_time_tups: _log('time: {:0.8f}, test: {}'.format(n_secs, example.cmdline)) def _gather_zero_arg_examples(modpath): """ Find functions in `modpath` args with no args (so we can automatically make a dummy docstring). """ for calldefs, _modpath in core.package_calldefs(modpath): for callname, calldef in calldefs.items(): if calldef.args is not None: # The only existing args should have defaults n_args = len(calldef.args.args) - len(calldef.args.defaults) if n_args == 0: # Create a dummy doctest example for a zero-arg function docsrc = '>>> {}()'.format(callname) example = doctest_example.DocTest(docsrc=docsrc, modpath=_modpath, callname=callname, block_type='zero-arg') example.mode = 'native' yield example def _run_examples(enabled_examples, verbose, config=None, _log=None): """ Internal helper, loops over each example, runs it, returns a summary """ n_total = len(enabled_examples) _log('running %d test(s)' % n_total) summaries = [] failed = [] warned = [] times = {} # It is important to raise immediately within the test to display errors # returned from multiprocessing. Especially in zero-arg mode on_error = 'return' if n_total > 1 else 'raise' on_error = 'return' for example in enabled_examples: try: try: tic = time.time() summary = example.run(verbose=verbose, on_error=on_error) toc = time.time() n_seconds = toc - tic times[example] = n_seconds except Exception: _log('\n'.join(example.repr_failure(with_tb=False))) raise summaries.append(summary) if example.warn_list: warned.append(example) if summary['skipped']: pass # if verbose == 0: # # TODO: should we write anything when verbose=0? # sys.stdout.write('S') # sys.stdout.flush() elif summary['passed']: pass # if verbose == 0: # # TODO: should we write anything when verbose=0? # sys.stdout.write('.') # sys.stdout.flush() else: failed.append(example) # if verbose == 0: # sys.stdout.write('F') # sys.stdout.flush() if on_error == 'raise': # What happens if we don't re-raise here? # If it is necessary, write a message explaining why _log('\n'.join(example.repr_failure())) ex_value = example.exc_info[1] raise ex_value except KeyboardInterrupt: _log('Caught CTRL+c: Stopping tests') break # except Exception: # summary = {'passed': False} # if verbose == 0: # sys.stdout.write('F') # sys.stdout.flush() if verbose == 0: _log('') n_passed = sum(s['passed'] for s in summaries) n_failed = sum(s['failed'] for s in summaries) n_skipped = sum(s['skipped'] for s in summaries) if config is not None and config.get('colored', True): _log(utils.color_text('============', 'white')) else: _log('============') if n_total > 1: # and verbose > 0: _log('Finished doctests') _log('%d / %d passed' % (n_passed, n_total)) run_summary = { 'failed': failed, 'warned': warned, 'action': 'run_examples', 'n_warned': len(warned), 'n_skipped': n_skipped, 'n_passed': n_passed, 'n_failed': n_failed, 'n_total': n_total, 'times': times, } return run_summary def _parse_commandline(command=None, style='auto', verbose=None, argv=None): # Determine command via sys.argv if not specified doctest_example.DoctestConfig() if argv is None: argv = sys.argv[1:] if command is None: if len(argv) >= 1: if argv[0] and not argv[0].startswith('-'): command = argv[0] # Change how docstrs are found # TODO: Undocumented flags. Either document in argparse or remove. if '--freeform' in argv: style = 'freeform' elif '--google' in argv: style = 'google' # Parse verbosity flag if verbose is None: if '--verbose' in argv: verbose = 3 elif '--quiet' in argv: verbose = 0 elif '--silent' in argv: verbose = -1 else: verbose = 3 return command, style, verbose def _update_argparse_cli(add_argument, prefix=None): """ Update the CLI with arguments that control how doctests are collected ando how aggregate results are reported. """ import os add_argument(*('-m', '--modname'), type=str, help='Module name or path. If specified positional modules are ignored', default=None) add_argument(*('-c', '--command'), type=str, help=( 'A doctest name or a command (all|list|dump|). ' 'Defaults to `all` (which runs everything). Using `list` will ' 'collect and print all doctests. ' 'Using `dump` will convert doctests into unit tests. ' 'Anything else will be interpreted as a "callname"'), default=None) add_argument(*('--style',), type=str, help='Choose the style of doctests that will be parsed', choices=['auto', 'google', 'freeform'], default=os.environ.get('XDOCTEST_STYLE', 'auto')) add_argument(*('--analysis',), type=str, help='How doctests are collected', choices=['auto', 'static', 'dynamic'], default=os.environ.get('XDOCTEST_ANALYSIS', 'auto')) add_argument(*('--durations',), type=int, help=('Specify execution times for slowest N tests.' 'N=0 will show times for all tests'), default=None) add_argument(*('--time',), dest='time', action='store_true', help=('Same as if durations=0')) add_argument_kws = [ # (['--style'], dict(dest='style', # type=str, help='choose your style', # choices=['auto', 'google', 'freeform'], default='auto')), # (['--quiet'], dict(type=int, action='store_false', dest='verbose', # help='sets verbosity=0')), ] if prefix is None: prefix = [''] environ_aware = {'style', 'analysis'} for alias, kw in add_argument_kws: # Use environment variables for some defaults argname = alias[0].lstrip('-') if argname in environ_aware: env_argname = 'XDOCTEST_' + argname.replace('-', '_').upper() if 'default' in kw: kw['default'] = os.environ.get(env_argname, kw['default']) alias = [ a.replace('--', '--' + p + '-') if p else a for a in alias for p in prefix ] if prefix[0]: kw['dest'] = prefix[0] + '_' + kw['dest'] add_argument(*alias, **kw) if __name__ == '__main__': r""" CommandLine: python -m xdoctest.runner all """ import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/runner.pyi000066400000000000000000000020201505122333300222010ustar00rootroot00000000000000from typing import Callable from types import ModuleType from typing import List from typing import Dict from typing import Any from typing import Set from _typeshed import Incomplete def log(msg: str, verbose: int, level: int = 1) -> None: ... def doctest_callable(func: Callable) -> None: ... def gather_doctests(doctest_identifiers, style: str = ..., analysis: str = ..., verbose: Incomplete | None = ...) -> None: ... def doctest_module(module_identifier: str | ModuleType | None = None, command: str | None = None, argv: List[str] | None = None, exclude: List[str] = ..., style: str = 'auto', verbose: int | None = None, config: Dict[str, object] | None = None, durations: int | None = None, analysis: str = 'auto') -> Dict[str, Any]: ... def undefined_names(sourcecode: str) -> Set[str]: ... Erotemic-xdoctest-fac8308/src/xdoctest/static_analysis.py000066400000000000000000001222141505122333300237210ustar00rootroot00000000000000""" The core logic that allows for xdoctest to parse source statically """ import sys from os.path import exists from os.path import isfile from os.path import join from os.path import splitext import os import ast import re from collections import deque, OrderedDict from xdoctest import utils from xdoctest.utils.util_import import _platform_pylib_exts # NOQA from xdoctest.utils.util_import import ( # NOQA split_modpath, modname_to_modpath, is_modname_importable, modpath_to_modname) import platform PLAT_IMPL = platform.python_implementation() IS_PY_GE_312 = sys.version_info[0:2] >= (3, 12) IS_PY_GE_308 = sys.version_info[0:2] >= (3, 8) # type: bool IS_PY_LT_314 = sys.version_info[0:2] < (3, 14) # type: bool if IS_PY_GE_312: from xdoctest import _tokenize as tokenize else: import tokenize class CallDefNode: """ Attributes: lineno_end (None | int): the line number the docstring ends on (if known) """ def __init__(self, callname, lineno, docstr, doclineno, doclineno_end, args=None): """ Args: callname (str): the name of the item containing the docstring. lineno (int): the line number the item containing the docstring. docstr (str): the docstring itself doclineno (int): the line number (1 based) the docstring begins on doclineno_end (int): the line number (1 based) the docstring ends on args (None | ast.arguments): arguments from static analysis :class:`TopLevelVisitor`. """ self.callname = callname self.lineno = lineno self.docstr = docstr self.doclineno = doclineno self.doclineno_end = doclineno_end self.lineno_end = None self.args = args def __str__(self): """ Returns: str """ return '{}[{}:{}][{}]'.format( self.callname, self.lineno, self.lineno_end, self.doclineno) class TopLevelVisitor(ast.NodeVisitor): """ Parses top-level function names and docstrings For other ``visit_`` values see [MeetTheNodes]_. References: .. [MeetTheNodes] http://greentreesnakes.readthedocs.io/en/latest/nodes.html CommandLine: python -m xdoctest.static_analysis TopLevelVisitor Attributes: calldefs (OrderedDict): source (None | str): sourcelines (None | List[str]): assignments (list): Example: >>> from xdoctest.static_analysis import * # NOQA >>> from xdoctest import utils >>> source = utils.codeblock( ''' def foo(): \"\"\" my docstring \"\"\" def subfunc(): pass def bar(): pass class Spam: def eggs(self): pass @staticmethod def hams(): pass @property def jams(self): return 3 @jams.setter def jams2(self, x): print('ignoring') @jams.deleter def jams(self, x): print('ignoring') ''') >>> self = TopLevelVisitor.parse(source) >>> callnames = set(self.calldefs.keys()) >>> assert callnames == { >>> 'foo', 'bar', 'Spam', 'Spam.eggs', 'Spam.hams', >>> 'Spam.jams'} >>> assert self.calldefs['foo'].docstr.strip() == 'my docstring' >>> assert 'subfunc' not in self.calldefs """ @classmethod def parse(cls, source): """ main entry point executes parsing algorithm and populates self.calldefs Args: source (str): """ self = cls(source) pt = self.syntax_tree() self.visit(pt) lineno_end = source.count('\n') + 2 # one indexing self.process_finished(lineno_end) return self def __init__(self, source=None): """ Args: source (None | str): """ super(TopLevelVisitor, self).__init__() self.calldefs = OrderedDict() self.source = source self.sourcelines = None self._current_classname = None # Keep track of when we leave a top level definition self._finish_queue = deque() # new self.assignments = [] def syntax_tree(self): """ creates the abstract syntax tree Returns: ast.Module: """ self.sourcelines = self.source.splitlines() source_utf8 = self.source.encode('utf8') pt = ast.parse(source_utf8) return pt def process_finished(self, node): """ process (get ending lineno) for everything marked as finished Args: node (ast.AST): """ if self._finish_queue: if isinstance(node, int): lineno_end = node else: lineno_end = getattr(node, 'lineno', None) while self._finish_queue: calldef = self._finish_queue.pop() calldef.lineno_end = lineno_end def visit(self, node): """ Args: node (ast.AST): """ self.process_finished(node) super(TopLevelVisitor, self).visit(node) def visit_FunctionDef(self, node): """ Args: node (ast.FunctionDef): """ if self._current_classname is None: callname = node.name else: callname = self._current_classname + '.' + node.name if node.decorator_list: for decor in node.decorator_list: if isinstance(decor, ast.Name): if decor.id == 'property': # likely a getter property # should we distinguish getters? # callname = callname + '.fget' pass if isinstance(decor, ast.Attribute): # Don't add setters / deleters to the callnames if decor.attr == 'deleter': # callname = callname + '.fdel' return if decor.attr == 'setter': # callname = callname + '.fset' return # TODO: Is this still necessary in modern Python versions? lineno = self._workaround_func_lineno(node) docstr, doclineno, doclineno_end = self._get_docstring(node) calldef = CallDefNode(callname, lineno, docstr, doclineno, doclineno_end, args=node.args) self.calldefs[callname] = calldef self._finish_queue.append(calldef) def visit_ClassDef(self, node): """ Args: node (ast.ClassDef): """ if self._current_classname is None: callname = node.name self._current_classname = callname docstr, doclineno, doclineno_end = self._get_docstring(node) calldef = CallDefNode(callname, node.lineno, docstr, doclineno, doclineno_end) self.calldefs[callname] = calldef self.generic_visit(node) self._current_classname = None self._finish_queue.append(calldef) def visit_Module(self, node): """ Args: node (ast.Module): """ # get the module level docstr docstr, doclineno, doclineno_end = self._get_docstring(node) if docstr: # the module level docstr is not really a calldef, but parse it for # backwards compatibility. callname = '__doc__' calldef = CallDefNode(callname, doclineno, docstr, doclineno, doclineno_end) self.calldefs[callname] = calldef self.generic_visit(node) # self._finish_queue.append(calldef) def visit_Assign(self, node): """ Args: node (ast.Assign): """ # print('VISIT FunctionDef node = %r' % (node,)) # print('VISIT FunctionDef node = %r' % (node.__dict__,)) if self._current_classname is None: for target in node.targets: if hasattr(target, 'id'): self.assignments.append(target.id) # print('node.value = %r' % (node.value,)) # TODO: assign constants to # self.const_lookup self.generic_visit(node) def visit_If(self, node): """ Args: node (ast.If): """ if isinstance(node.test, ast.Compare): # pragma: nobranch try: if IS_PY_GE_312: if all([ isinstance(node.test.ops[0], ast.Eq), node.test.left.id == '__name__', node.test.comparators[0].value == '__main__', ]): # Ignore main block return else: if all([ isinstance(node.test.ops[0], ast.Eq), node.test.left.id == '__name__', node.test.comparators[0].s == '__main__', ]): # Ignore main block return except Exception: # nocover pass self.generic_visit(node) # nocover # def visit_ExceptHandler(self, node): # pass # def visit_TryFinally(self, node): # pass # def visit_TryExcept(self, node): # pass # def visit_Try(self, node): # TODO: parse a node only if it is visible in all cases # pass # # self.generic_visit(node) # nocover # -- helpers --- def _docnode_line_workaround(self, docnode): """ Find the start and ending line numbers of a docstring Args: docnode (ast.AST): Returns: Tuple[int, int] CommandLine: xdoctest -m xdoctest.static_analysis TopLevelVisitor._docnode_line_workaround Example: >>> from xdoctest.static_analysis import * # NOQA >>> sq = chr(39) # single quote >>> dq = chr(34) # double quote >>> source = utils.codeblock( ''' def func0(): {ddd} docstr0 {ddd} def func1(): {ddd} docstr1 {ddd} def func2(): {ddd} docstr2 {ddd} def func3(): {ddd} docstr3 {ddd} # foobar def func5(): {ddd}pathological case {sss} # {ddd} # {sss} # {ddd} # {ddd} def func6(): " single quoted docstr " def func7(): r{ddd} raw line {ddd} ''').format(ddd=dq * 3, sss=sq * 3) >>> self = TopLevelVisitor(source) >>> func_nodes = self.syntax_tree().body >>> print(utils.add_line_numbers(utils.highlight_code(source), start=1)) >>> wants = [ >>> (2, 2), >>> (4, 5), >>> (7, 8), >>> (10, 12), >>> (14, 15), >>> (17, 17), >>> (19, 21), >>> ] >>> for i, func_node in enumerate(func_nodes): >>> docnode = func_node.body[0] >>> got = self._docnode_line_workaround(docnode) >>> want = wants[i] >>> print('got = {!r}'.format(got)) >>> print('want = {!r}'.format(want)) >>> assert got == want """ # lineno points to the last line of a string in CPython < 3.8 if hasattr(docnode, 'end_lineno'): endpos = docnode.end_lineno - 1 else: if PLAT_IMPL == 'PyPy': startpos = docnode.lineno - 1 if IS_PY_GE_312: docstr = utils.ensure_unicode(docnode.value.value) else: docstr = utils.ensure_unicode(docnode.value.s) sourcelines = self.sourcelines start, stop = self._find_docstr_endpos_workaround(docstr, sourcelines, startpos) # Convert 0-based line positions to 1-based line numbers doclineno = start + 1 doclineno_end = stop + 1 return doclineno, doclineno_end else: # Hack for older versions # TODO: fix in pypy endpos = docnode.lineno - 1 if IS_PY_GE_312: docstr = utils.ensure_unicode(docnode.value.value) else: docstr = utils.ensure_unicode(docnode.value.s) sourcelines = self.sourcelines start, stop = self._find_docstr_startpos_workaround(docstr, sourcelines, endpos) # Convert 0-based line positions to 1-based line numbers doclineno = start + 1 doclineno_end = stop # print('docnode = {!r}'.format(docnode)) return doclineno, doclineno_end @classmethod def _find_docstr_endpos_workaround(cls, docstr, sourcelines, startpos): """ Like docstr_line_workaround, but works from the top-down instead of bottom-up. This is for pypy. Given a docstring, its original source lines, and where the start position is, this function finds the end-position of the docstr Example: >>> fmtkw = dict(sss=chr(39) * 3, ddd=chr(34) * 3) >>> source = utils.codeblock( ''' {ddd} docstr0 {ddd} '''.format(**fmtkw)) >>> sourcelines = source.splitlines() >>> docstr = eval(source, {}, {}) >>> startpos = 0 >>> start, stop = TopLevelVisitor._find_docstr_endpos_workaround(docstr, sourcelines, startpos) >>> assert (start, stop) == (0, 2) >>> # >>> source = utils.codeblock( ''' "docstr0" '''.format(**fmtkw)) >>> sourcelines = source.splitlines() >>> docstr = eval(source, {}, {}) >>> startpos = 0 >>> start, stop = TopLevelVisitor._find_docstr_endpos_workaround(docstr, sourcelines, startpos) >>> assert (start, stop) == (0, 0) """ start = startpos stop = startpos startline = sourcelines[start] trips = ("'''", '"""') for trip in trips: if startline.strip().startswith((trip, 'r' + trip)): nlines = docstr.count('\n') # assuming that the docstr is actually terminated with this # kind of triple quote, then the end line is at this position cand_stop_ = start + nlines endline = sourcelines[cand_stop_] endpat = re.escape(trip) + r'\s*#.*$' endline_ = re.sub(endpat, trip, endline).strip() # The startline should also begin with the same triple quote # Account for raw strings. Note f-strings cannot be docstrings if endline_.endswith(trip): stop = cand_stop_ break else: # Conditions failed, revert to assuming a one-line string. stop = start return start, stop def _find_docstr_startpos_workaround(self, docstr, sourcelines, endpos): r""" Find the which sourcelines contain the docstring Args: docstr (str): the extracted docstring. sourcelines (list): a list of all lines in the file. We assume the docstring exists as a pure string literal in the source. In other words, no postprocessing via split, format, or any other dynamic programmatic modification should be made to the docstrings. Python's docstring extractor assumes this as well. endpos (int): line position (starting at 0) the docstring ends on. Note: positions are 0 based but linenos are 1 based. Returns: tuple[Int, Int]: start, stop: start: the line position (0 based) the docstring starts on stop: the line position (0 based) that the docstring stops such that sourcelines[start:stop] will contain the docstring CommandLine: python -m xdoctest xdoctest.static_analysis TopLevelVisitor._find_docstr_startpos_workaround python -m xdoctest xdoctest.static_analysis TopLevelVisitor._find_docstr_startpos_workaround --debug Example: >>> # xdoctest: +REQUIRES(CPython) >>> # This function is a specific workaround for a CPython bug. >>> from xdoctest.static_analysis import * >>> sq = chr(39) # single quote >>> dq = chr(34) # double quote >>> source = utils.codeblock( ''' def func0(): {ddd} docstr0 {ddd} def func1(): {ddd} docstr1 {ddd} def func2(): {ddd} docstr2 {ddd} def func3(): {ddd} docstr3 {ddd} # foobar def func5(): {ddd}pathological case {sss} # {ddd} # {sss} # {ddd} # {ddd} def func6(): " single quoted docstr " def func7(): r{ddd} raw line {ddd} ''').format(ddd=dq * 3, sss=sq * 3) >>> print(utils.add_line_numbers(utils.highlight_code(source), start=0)) >>> targets = [ >>> (1, 2), >>> (3, 5), >>> (6, 8), >>> (9, 12), >>> (13, 15), >>> (16, 17), >>> (18, 21), >>> ] >>> self = TopLevelVisitor.parse(source) >>> pt = ast.parse(source.encode('utf8')) >>> sourcelines = source.splitlines() >>> # PYPY docnode.lineno specify the startpos of a docstring not >>> # the end. >>> print('\n\n====\n\n') >>> #for i in [0, 1]: >>> for i in range(len(targets)): >>> print('----------') >>> funcnode = pt.body[i] >>> print('funcnode = {!r}'.format(funcnode)) >>> docnode = funcnode.body[0] >>> print('funcnode.__dict__ = {!r}'.format(funcnode.__dict__)) >>> print('docnode = {!r}'.format(docnode)) >>> print('docnode.value = {!r}'.format(docnode.value)) >>> print('docnode.value.__dict__ = {!r}'.format(docnode.value.__dict__)) >>> if IS_PY_GE_312: >>> print('docnode.value.value = {!r}'.format(docnode.value.value)) >>> else: >>> print('docnode.value.s = {!r}'.format(docnode.value.s)) >>> print('docnode.lineno = {!r}'.format(docnode.lineno)) >>> print('docnode.col_offset = {!r}'.format(docnode.col_offset)) >>> print('docnode = {!r}'.format(docnode)) >>> #import IPython >>> #IPython.embed() >>> docstr = ast.get_docstring(funcnode, clean=False) >>> print('len(docstr) = {}'.format(len(docstr))) >>> endpos = docnode.lineno - 1 >>> if hasattr(docnode, 'end_lineno'): >>> endpos = docnode.end_lineno - 1 >>> print('endpos = {!r}'.format(endpos)) >>> start, end = self._find_docstr_startpos_workaround(docstr, sourcelines, endpos) >>> print('i = {!r}'.format(i)) >>> print('got = {}, {}'.format(start, end)) >>> print('want = {}, {}'.format(*targets[i])) >>> if targets[i] != (start, end): >>> print('---') >>> print(docstr) >>> print('---') >>> print('sourcelines = [\n{}\n]'.format(', \n'.join(list(map(repr, enumerate(sourcelines)))))) >>> print('endpos = {!r}'.format(endpos)) >>> raise AssertionError('docstr workaround is failing') >>> print('----------') """ # First assume a one-line string that starts and stops on the same line start = endpos stop = endpos + 1 endline = sourcelines[stop - 1] # Determine if the docstring is a triple quoted string, by trying both # triple quote styles and checking if the string starts and ends with # the same style. If both cases are true we know we are in a triple # quoted string literal and can therefore safely extract the starting # line position. trips = ("'''", '"""') for trip in trips: pattern = re.escape(trip) + r'\s*#.*$' # Assuming the multiline string is using `trip` as the triple quote # format, then the first instance of that pattern must terminate # the string literal. Afterwards the only valid characters are # whitespace and comments. Anything after the comment can be # ignored. The above pattern will match the first triple quote it # sees, and then will remove any trailing comments. endline_ = re.sub(pattern, trip, endline).strip() # After removing comments, if the endline endswith a triple quote, # then we must be in a multiline string IF the startline starts # with that same triple quote. We should be able to determine where # the startline is because we know how many newline characters are # in the extracted docstring. This works because all newline # characters in multiline string literals MUST correspond to actual # newlines in the source code. if endline_.endswith(trip): nlines = docstr.count('\n') # assuming that the docstr is actually terminated with this # kind of triple quote, then the start line is at this position cand_start_ = stop - nlines - 1 startline = sourcelines[cand_start_] # The startline should also begin with the same triple quote # Account for raw strings. Note f-strings cannot be docstrings if startline.strip().startswith((trip, 'r' + trip)): # Both conditions pass. start = cand_start_ break else: # Conditions failed, revert to assuming a one-line string. start = stop - 1 return start, stop def _get_docstring(self, node): """ CommandLine: xdoctest -m xdoctest.static_analysis.py TopLevelVisitor._get_docstring Example: >>> source = utils.codeblock( ''' def foo(): 'docstr' ''') >>> self = TopLevelVisitor(source) >>> node = self.syntax_tree().body[0] >>> self._get_docstring(node) ('docstr', 2, 2) """ docstr = ast.get_docstring(node, clean=False) if docstr is not None: docnode = node.body[0] doclineno, doclineno_end = self._docnode_line_workaround(docnode) else: doclineno = None doclineno_end = None return (docstr, doclineno, doclineno_end) def _workaround_func_lineno(self, node): """ Finds the correct line for the original function definition even when decorators are involved. Example: >>> source = utils.codeblock( ''' @bar @baz def foo(): 'docstr' ''') >>> self = TopLevelVisitor(source) >>> node = self.syntax_tree().body[0] >>> self._workaround_func_lineno(node) 3 """ # Try and find the lineno of the function definition # (maybe the fact that its on a decorator is actually right...) if node.decorator_list: # Decorators can throw off the line the function is declared on linex = node.lineno - 1 pattern = r'\s*def\s*' + node.name # I think this is actually robust while not re.match(pattern, self.sourcelines[linex]): linex += 1 lineno = linex + 1 else: lineno = node.lineno return lineno def parse_static_calldefs(source=None, fpath=None): """ Statically finds top-level callable functions and methods in python source Args: source (str): python text fpath (str): filepath to read if source is not specified Returns: Dict[str, CallDefNode]: mapping from callnames to CallDefNodes, which contain info about the item with the doctest. Example: >>> from xdoctest import static_analysis >>> fpath = static_analysis.__file__.replace('.pyc', '.py') >>> calldefs = parse_static_calldefs(fpath=fpath) >>> assert 'parse_static_calldefs' in calldefs """ if source is None: # pragma: no branch try: with open(fpath, 'rb') as file_: source = file_.read().decode('utf-8') except Exception: try: with open(fpath, 'rb') as file_: source = file_.read() except Exception: print('Unable to read fpath = {!r}'.format(fpath)) raise try: self = TopLevelVisitor.parse(source) return self.calldefs except Exception: # nocover if fpath: print('Failed to parse docstring for fpath=%r' % (fpath,)) else: print('Failed to parse docstring') raise def parse_calldefs(source=None, fpath=None): from xdoctest.utils import util_deprecation util_deprecation.schedule_deprecation( modname='xdoctest', name='parse_calldefs', type='function', migration='use parse_static_calldefs instead', deprecate='1.0.0', error='1.1.0', remove='1.2.0' ) return parse_static_calldefs(source=source, fpath=fpath) def _parse_static_node_value(node): """ Extract a constant value from a node if possible """ import numbers if (isinstance(node, ast.Constant) and isinstance(node.value, numbers.Number)): value = node.value elif (isinstance(node, ast.Constant) and isinstance(node.value, str)): value = node.value elif isinstance(node, ast.List): value = list(map(_parse_static_node_value, node.elts)) elif isinstance(node, ast.Tuple): value = tuple(map(_parse_static_node_value, node.elts)) elif isinstance(node, (ast.Dict)): keys = map(_parse_static_node_value, node.keys) values = map(_parse_static_node_value, node.values) value = OrderedDict(zip(keys, values)) # value = dict(zip(keys, values)) elif IS_PY_LT_314 and isinstance(node, (ast.NameConstant)): value = node.value elif isinstance(node, ast.Constant): value = node.value else: print(node.__dict__) raise TypeError('Cannot parse a static value from non-static node ' 'of type: {!r}'.format(type(node))) return value def parse_static_value(key, source=None, fpath=None): """ Statically parse a constant variable's value from python code. TODO: This does not belong here. Move this to an external static analysis library. Args: key (str): name of the variable source (str): python text fpath (str): filepath to read if source is not specified Returns: object Example: >>> from xdoctest.static_analysis import parse_static_value >>> key = 'foo' >>> source = 'foo = 123' >>> assert parse_static_value(key, source=source) == 123 >>> source = 'foo = "123"' >>> assert parse_static_value(key, source=source) == '123' >>> source = 'foo = [1, 2, 3]' >>> assert parse_static_value(key, source=source) == [1, 2, 3] >>> source = 'foo = (1, 2, "3")' >>> assert parse_static_value(key, source=source) == (1, 2, "3") >>> source = 'foo = {1: 2, 3: 4}' >>> assert parse_static_value(key, source=source) == {1: 2, 3: 4} >>> source = 'foo = None' >>> assert parse_static_value(key, source=source) == None >>> #parse_static_value('bar', source=source) >>> #parse_static_value('bar', source='foo=1; bar = [1, foo]') """ if source is None: # pragma: no branch try: with open(fpath, 'rb') as file_: source = file_.read().decode('utf-8') except Exception: with open(fpath, 'rb') as file_: source = file_.read() pt = ast.parse(source) class AssignentVisitor(ast.NodeVisitor): def visit_Assign(self, node): for target in node.targets: target_id = getattr(target, 'id', None) if target_id == key: self.value = _parse_static_node_value(node.value) sentinel = object() visitor = AssignentVisitor() visitor.value = sentinel visitor.visit(pt) if visitor.value is sentinel: raise NameError('No static variable named {!r}'.format(key)) return visitor.value def package_modpaths(pkgpath, with_pkg=False, with_mod=True, followlinks=True, recursive=True, with_libs=False, check=True): r""" Finds sub-packages and sub-modules belonging to a package. Args: pkgpath (str): path to a module or package with_pkg (bool): if True includes package __init__ files (default = False) with_mod (bool): if True includes module files (default = True) exclude (list): ignores any module that matches any of these patterns recursive (bool): if False, then only child modules are included with_libs (bool): if True then compiled shared libs will be returned as well check (bool): if False, then then pkgpath is considered a module even if it does not contain an __init__ file. Yields: str: module names belonging to the package References: http://stackoverflow.com/questions/1707709/list-modules-in-py-package Example: >>> from xdoctest.static_analysis import * >>> pkgpath = modname_to_modpath('xdoctest') >>> paths = list(package_modpaths(pkgpath)) >>> print('\n'.join(paths)) >>> names = list(map(modpath_to_modname, paths)) >>> assert 'xdoctest.core' in names >>> assert 'xdoctest.__main__' in names >>> assert 'xdoctest' not in names >>> print('\n'.join(names)) """ if isfile(pkgpath): # If input is a file, just return it yield pkgpath else: if with_pkg: root_path = join(pkgpath, '__init__.py') if not check or exists(root_path): yield root_path valid_exts = ['.py'] if with_libs: valid_exts += utils.util_import._platform_pylib_exts() for dpath, dnames, fnames in os.walk(pkgpath, followlinks=followlinks): ispkg = exists(join(dpath, '__init__.py')) if ispkg or not check: check = True # always check subdirs if with_mod: for fname in fnames: if splitext(fname)[1] in valid_exts: # dont yield inits. Handled in pkg loop. if fname != '__init__.py': path = join(dpath, fname) yield path if with_pkg: for dname in dnames: path = join(dpath, dname, '__init__.py') if exists(path): yield path else: # Stop recursing when we are out of the package del dnames[:] if not recursive: break def is_balanced_statement(lines, only_tokens=False, reraise=0): r""" Checks if the lines have balanced braces and quotes. Args: lines (List[str]): list of strings, one for each line Returns: bool: True if the statement is balanced, otherwise False CommandLine: xdoctest -m xdoctest.static_analysis is_balanced_statement:0 References: https://stackoverflow.com/questions/46061949/parse-until-complete Example: >>> from xdoctest.static_analysis import * # NOQA >>> assert is_balanced_statement(['print(foobar)']) >>> assert is_balanced_statement(['foo = bar']) is True >>> assert is_balanced_statement(['foo = (']) is False >>> assert is_balanced_statement(['foo = (', "')(')"]) is True >>> assert is_balanced_statement( ... ['foo = (', "'''", ")]'''", ')']) is True >>> assert is_balanced_statement( ... ['foo = ', "'''", ")]'''", ')']) is False >>> #assert is_balanced_statement(['foo = ']) is False >>> #assert is_balanced_statement(['== ']) is False >>> lines = ['def foo():', '', ' x = 1', 'assert True', ''] >>> assert is_balanced_statement(lines) Example: >>> from xdoctest.static_analysis import * >>> source_parts = [ >>> 'setup(', >>> " name='extension',", >>> ' ext_modules=[', >>> ' CppExtension(', >>> " name='extension',", >>> " sources=['extension.cpp'],", >>> " extra_compile_args=['-g'])),", >>> ' ],', >>> ] >>> print('\n'.join(source_parts)) >>> assert not is_balanced_statement(source_parts) >>> source_parts = [ >>> 'setup(', >>> " name='extension',", >>> ' ext_modules=[', >>> ' CppExtension(', >>> " name='extension',", >>> " sources=['extension.cpp'],", >>> " extra_compile_args=['-g']),", >>> ' ],', >>> ' cmdclass={', >>> " 'build_ext': BuildExtension", >>> ' })', >>> ] >>> print('\n'.join(source_parts)) >>> assert is_balanced_statement(source_parts) Example: >>> lines = ['try: raise Exception'] >>> is_balanced_statement(lines, only_tokens=1) True >>> is_balanced_statement(lines, only_tokens=0) False Example: >>> # Cause a failure case on 3.12 >>> from xdoctest.static_analysis import * >>> lines = ['3, 4]', 'print(len(x))'] >>> is_balanced_statement(lines, only_tokens=1) False """ # Only iterate through non-empty lines otherwise tokenize will stop short lines = list(lines) iterable = (line for line in lines if line) def _readline(): return next(iterable) try: for t in tokenize.generate_tokens(_readline): pass except tokenize.TokenError as ex: message = ex.args[0] # First case is Python <= 3.11, Second case is >= 3.12 if message.startswith(('EOF in multi-line', 'unexpected EOF in multi-line')): if reraise: raise return False raise except IndentationError as ex: message = ex.args[0] if message.startswith('unindent does not match any outer indentation'): if reraise: raise return False raise else: # Note: trying to use ast.parse(block) will not work # here because it breaks in try, except, else if not only_tokens: # The above test wont trigger for cases involving higher level # python grammar. If we wish to test for these we will have to use # an AST. try: text = '\n'.join(lines) # from textwrap import dedent # text = dedent(text) six_axt_parse(text) except SyntaxError: if reraise: raise return False return True def extract_comments(source): """ Returns the text in each comment in a block of python code. Uses tokenize to account for quotations. Args: source (str | List[str]): CommandLine: python -m xdoctest.static_analysis extract_comments Example: >>> from xdoctest import utils >>> source = utils.codeblock( >>> ''' # comment 1 a = '# not a comment' # comment 2 c = 3 ''') >>> comments = list(extract_comments(source)) >>> assert comments == ['# comment 1', '# comment 2'] >>> comments = list(extract_comments(source.splitlines())) >>> assert comments == ['# comment 1', '# comment 2'] """ if isinstance(source, str): lines = source.splitlines() else: lines = source # Only iterate through non-empty lines otherwise tokenize will stop short iterable = (line for line in lines if line) def _readline(): return next(iterable) try: for t in tokenize.generate_tokens(_readline): if t[0] == tokenize.COMMENT: yield t[1] except tokenize.TokenError: pass def _strip_hashtag_comments_and_newlines(source): """ Removes hashtag comments from underlying source Args: source (str | List[str]): CommandLine: xdoctest -m xdoctest.static_analysis _strip_hashtag_comments_and_newlines TODO: would be better if this was some sort of configurable minify API Example: >>> from xdoctest.static_analysis import _strip_hashtag_comments_and_newlines >>> from xdoctest import utils >>> fmtkw = dict(sss=chr(39) * 3, ddd=chr(34) * 3) >>> source = utils.codeblock( >>> ''' # comment 1 a = '# not a comment' # comment 2 multiline_string = {ddd} one {ddd} b = [ 1, # foo # bar 3, ] c = 3 ''').format(**fmtkw) >>> non_comments = _strip_hashtag_comments_and_newlines(source) >>> print(non_comments) >>> assert non_comments.count(chr(10)) == 10 >>> assert non_comments.count('#') == 1 """ if isinstance(source, str): import io f = io.StringIO(source) readline = f.readline else: readline = iter(source).__next__ def strip_hashtag_comments(tokens): """ Drop comment tokens from a `tokenize` stream. """ return (t for t in tokens if t[0] != tokenize.COMMENT) def strip_consecutive_newlines(tokens): """ Consecutive newlines are dropped and trailing whitespace Adapted from: https://github.com/mitogen-hq/mitogen/blob/master/mitogen/minify.py#L65 """ prev_typ = None prev_end_col = 0 skipped_rows = 0 for token_info in tokens: typ, tok, (start_row, start_col), (end_row, end_col), line = token_info if typ in (tokenize.NL, tokenize.NEWLINE): if prev_typ in (tokenize.NL, tokenize.NEWLINE, None): skipped_rows += 1 continue else: start_col = prev_end_col end_col = start_col + 1 prev_typ = typ prev_end_col = end_col yield typ, tok, (start_row - skipped_rows, start_col), (end_row - skipped_rows, end_col), line tokens = tokenize.generate_tokens(readline) tokens = strip_hashtag_comments(tokens) tokens = strip_consecutive_newlines(tokens) new_source = tokenize.untokenize(tokens) return new_source def six_axt_parse(source_block, filename='', compatible=True): """ Python 2/3 compatible replacement for ast.parse(source_block, filename='') Args: source (str): filename (str): compatible (bool): Returns: ast.Module | types.CodeType """ pt = ast.parse(source_block, filename=filename) return pt if __name__ == '__main__': import xdoctest as xdoc xdoc.doctest_module() Erotemic-xdoctest-fac8308/src/xdoctest/static_analysis.pyi000066400000000000000000000054521505122333300240760ustar00rootroot00000000000000import ast from typing import Dict from collections import OrderedDict from typing import List import types from _typeshed import Incomplete from collections.abc import Generator from typing import Any from xdoctest.utils.util_import import is_modname_importable as is_modname_importable, modname_to_modpath as modname_to_modpath, modpath_to_modname as modpath_to_modname, split_modpath as split_modpath PLAT_IMPL: Incomplete HAS_UPDATED_LINENOS: Incomplete class CallDefNode: lineno_end: None | int callname: str lineno: int docstr: str doclineno: int doclineno_end: int args: None | ast.arguments def __init__(self, callname: str, lineno: int, docstr: str, doclineno: int, doclineno_end: int, args: None | ast.arguments = None) -> None: ... class TopLevelVisitor(ast.NodeVisitor): calldefs: OrderedDict source: None | str sourcelines: None | List[str] assignments: list @classmethod def parse(cls, source: str): ... def __init__(self, source: None | str = None) -> None: ... def syntax_tree(self) -> ast.Module: ... def process_finished(self, node: ast.AST) -> None: ... def visit(self, node: ast.AST) -> None: ... def visit_FunctionDef(self, node: ast.FunctionDef) -> None: ... def visit_ClassDef(self, node: ast.ClassDef) -> None: ... def visit_Module(self, node: ast.Module) -> None: ... def visit_Assign(self, node: ast.Assign) -> None: ... def visit_If(self, node: ast.If) -> None: ... def parse_static_calldefs(source: str | None = None, fpath: str | None = None) -> Dict[str, CallDefNode]: ... def parse_calldefs(source: Incomplete | None = ..., fpath: Incomplete | None = ...): ... def parse_static_value(key: str, source: str | None = None, fpath: str | None = None) -> object: ... def package_modpaths(pkgpath: str, with_pkg: bool = False, with_mod: bool = True, followlinks: bool = ..., recursive: bool = True, with_libs: bool = False, check: bool = True) -> Generator[str, None, None]: ... def is_balanced_statement(lines: List[str], only_tokens: bool = ..., reraise: int = ...) -> bool: ... def extract_comments(source: str | List[str]) -> Generator[Any, None, Any]: ... def six_axt_parse(source_block, filename: str = '', compatible: bool = True) -> ast.Module | types.CodeType: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/000077500000000000000000000000001505122333300213135ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/src/xdoctest/utils/__init__.py000066400000000000000000000036721505122333300234340ustar00rootroot00000000000000""" Most of these utilities exist in ubelt, but we copy them here to keep xdoctest as a package with minimal dependencies, whereas ubelt includes a larger set of utilities. This __init__ file is generated using mkinit: mkinit xdoctest.utils """ from xdoctest.utils import util_asyncio from xdoctest.utils import util_import from xdoctest.utils import util_misc from xdoctest.utils import util_mixins from xdoctest.utils import util_path from xdoctest.utils import util_str from xdoctest.utils import util_stream from xdoctest.utils.util_import import (PythonPathContext, import_module_from_name, import_module_from_path, is_modname_importable, modname_to_modpath, modpath_to_modname, normalize_modpath, split_modpath,) from xdoctest.utils.util_misc import (TempDoctest,) from xdoctest.utils.util_mixins import (NiceRepr,) from xdoctest.utils.util_path import (TempDir, ensuredir,) from xdoctest.utils.util_str import (add_line_numbers, codeblock, color_text, ensure_unicode, highlight_code, indent, strip_ansi,) from xdoctest.utils.util_stream import (CaptureStdout, CaptureStream, TeeStringIO,) __all__ = ['CaptureStdout', 'CaptureStream', 'NiceRepr', 'PythonPathContext', 'TeeStringIO', 'TempDir', 'TempDoctest', 'add_line_numbers', 'codeblock', 'color_text', 'ensure_unicode', 'ensuredir', 'highlight_code', 'import_module_from_name', 'import_module_from_path', 'indent', 'is_modname_importable', 'modname_to_modpath', 'modpath_to_modname', 'normalize_modpath', 'split_modpath', 'strip_ansi', 'util_import', 'util_misc', 'util_mixins', 'util_path', 'util_str', 'util_stream'] Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_asyncio.py000066400000000000000000000113471505122333300243750ustar00rootroot00000000000000""" Utilities related to async support """ import asyncio import sys # see asyncio.runners.Runner class FallbackRunner: """A fallback implementation of :class:`asyncio.Runner` for Python<3.11.""" def __init__(self): self._loop = None def run(self, coro): """ Run code in the embedded event loop. Example: >>> import asyncio >>> async def test(): >>> return asyncio.sleep(0, result='slept') >>> runner = FallbackRunner() >>> try: >>> task = runner.run(test()) >>> result = runner.run(task) >>> finally: >>> runner.close() >>> result 'slept' Example: >>> import asyncio >>> runner = FallbackRunner() >>> try: >>> runner.run(asyncio.sleep(0)) # xdoctest: +ASYNC >>> finally: >>> runner.close() Traceback (most recent call last): RuntimeError: Runner.run() cannot be called from a running event loop """ if running(): msg = 'Runner.run() cannot be called from a running event loop' raise RuntimeError(msg) if self._loop is None: self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) return self._loop.run_until_complete(coro) def close(self): """ Shutdown and close event loop. Example: >>> import asyncio >>> runner = FallbackRunner() >>> try: >>> runner.run(asyncio.sleep(0)) >>> finally: >>> runner.close() >>> runner.close() # must be no-op Example: >>> import asyncio >>> async def main(): >>> global task # avoid disappearing >>> task = asyncio.create_task(asyncio.sleep(3600)) >>> await asyncio.sleep(0) # start the task >>> runner = FallbackRunner() >>> try: >>> runner.run(main()) >>> finally: >>> runner.close() >>> task.cancelled() True Example: >>> import asyncio >>> def handler(loop, context): >>> global exc_context >>> exc_context = context >>> async def test(): >>> asyncio.get_running_loop().set_exception_handler(handler) >>> try: >>> await asyncio.sleep(3600) >>> except asyncio.CancelledError: >>> 1 // 0 # raise ZeroDivisionError >>> async def main(): >>> global task # avoid disappearing >>> task = asyncio.create_task(test()) >>> await asyncio.sleep(0) # start the task >>> runner = FallbackRunner() >>> try: >>> runner.run(main()) >>> finally: >>> runner.close() >>> exc_context['message'] 'unhandled exception during asyncio.run() shutdown' >>> isinstance(exc_context['exception'], ZeroDivisionError) True >>> exc_context['task'] is task True """ loop = self._loop if loop is None: return try: _cancel_all_tasks(loop) loop.run_until_complete(loop.shutdown_asyncgens()) if sys.version_info >= (3, 9): loop.run_until_complete(loop.shutdown_default_executor()) finally: asyncio.set_event_loop(None) loop.close() self._loop = None # see asyncio.runners._cancel_all_tasks def _cancel_all_tasks(loop): to_cancel = asyncio.all_tasks(loop) if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, }) def running(): """ Return :data:`True` if there is a running event loop. Example: >>> running() # xdoctest: +ASYNC True >>> running() # xdoctest: -ASYNC False """ try: loop = asyncio.get_running_loop() except RuntimeError: # no running event loop loop = None return loop is not None if sys.version_info >= (3, 11): from asyncio import Runner else: Runner = FallbackRunner Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_asyncio.pyi000066400000000000000000000007231505122333300245420ustar00rootroot00000000000000import sys from typing import Any, TypeVar, final if sys.version_info >= (3, 9): from collections.abc import Coroutine else: from typing import Coroutine _T = TypeVar('_T') @final class FallbackRunner: def __init__(self) -> None: ... def run(self, coro: Coroutine[Any, Any, _T]) -> _T: ... def close(self) -> None: ... def running() -> bool: ... if sys.version_info >= (3, 11): from asyncio import Runner else: Runner = FallbackRunner Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_deprecation.py000066400000000000000000000123461505122333300252250ustar00rootroot00000000000000""" Utilities for helping robustly deprecate features. """ def schedule_deprecation(modname, name='?', type='?', migration='', deprecate=None, error=None, remove=None): """ Deprecation machinery to help provide users with a smoother transition. This function provides a concise way to mark a feature as deprecated by providing a description of the deprecated feature, documentation on how to migrate away from the deprecated feature, and the versions that the feature is scheduled for deprecation and eventual removal. Based on the version of the library and the specified schedule this function will either do nothing, emit a warning, or raise an error with helpful messages for both users and developers. Args: modname (str): The name of the underlying module associated with the feature to be deprecated. The module must already be imported and have a passable ``__version__`` attribute. name (str): The name of the feature to deprecate. This is usually a function or argument name. type (str): A description of what the feature is. This is not a formal type, but rather a prose description: e.g. "argument to my_func". migration (str): A description that lets users know what they should do instead of using the deprecated feature. deprecate (str | None): The version when the feature is officially deprecated and this function should start to emit a deprecation warning. error (str | None): The version when the feature is officially no longer supported, and will start to raise a RuntimeError. remove (str | None): The version when the feature is completely removed. An AssertionError will be raised if this function is still present reminding the developer to remove the feature (or extend the remove version). Note: The :class:`DeprecationWarning` is not visible by default. https://docs.python.org/3/library/warnings.html Example: >>> import sys >>> import types >>> import pytest >>> dummy_module = sys.modules['dummy_module'] = types.ModuleType('dummy_module') >>> # When less than the deprecated version this does nothing >>> dummy_module.__version__ = '1.0.0' >>> schedule_deprecation( ... 'dummy_module', 'myfunc', 'function', 'do something else', ... deprecate='1.1.0', error='1.2.0', remove='1.3.0') >>> # Now this raises warning >>> with pytest.warns(DeprecationWarning): ... dummy_module.__version__ = '1.1.0' ... schedule_deprecation( ... 'dummy_module', 'myfunc', 'function', 'do something else', ... deprecate='1.1.0', error='1.2.0', remove='1.3.0') >>> # Now this raises an error for the user >>> with pytest.raises(RuntimeError): ... dummy_module.__version__ = '1.2.0' ... schedule_deprecation( ... 'dummy_module', 'myfunc', 'function', 'do something else', ... deprecate='1.1.0', error='1.2.0', remove='1.3.0') >>> # Now this raises an error for the developer >>> with pytest.raises(AssertionError): ... dummy_module.__version__ = '1.3.0' ... schedule_deprecation( ... 'dummy_module', 'myfunc', 'function', 'do something else', ... deprecate='1.1.0', error='1.2.0', remove='1.3.0') >>> # When no versions are specified, it simply emits the warning >>> with pytest.warns(DeprecationWarning): ... dummy_module.__version__ = '1.1.0' ... schedule_deprecation( ... 'dummy_module', 'myfunc', 'function', 'do something else') """ import sys import warnings try: from packaging.version import parse as parse_version except ImportError: from distutils.version import LooseVersion as parse_version module = sys.modules[modname] current = parse_version(module.__version__) deprecate_str = '' if deprecate is not None: deprecate = parse_version(deprecate) deprecate_str = ' in {}'.format(deprecate) remove_str = '' if remove is not None: remove = parse_version(remove) remove_str = ' in {}'.format(remove) error_str = '' if error is not None: error = parse_version(error) error_str = ' in {}'.format(error) if deprecate is None or current >= deprecate: msg = ( 'The "{name}" {type} was deprecated{deprecate_str}, will cause ' 'an error{error_str} and will be removed{remove_str}. The current ' 'version is {current}. {migration}' ).format(**locals()).strip() if remove is not None and current >= remove: raise AssertionError( 'Forgot to remove deprecated: ' + msg + ' ' + 'Remove the function, or extend the scheduled remove version.' ) if error is not None and current >= error: raise RuntimeError(msg) else: warnings.warn(msg, DeprecationWarning) Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_deprecation.pyi000066400000000000000000000005301505122333300253660ustar00rootroot00000000000000def schedule_deprecation(modname: str, name: str = '?', type: str = '?', migration: str = '', deprecate: str | None = None, error: str | None = None, remove: str | None = None) -> None: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_import.py000066400000000000000000001143411505122333300242400ustar00rootroot00000000000000""" This file was autogenerated based on code in ubelt via dev/port_ubelt_utils.py in the xdoctest repo """ from os.path import abspath from os.path import exists from os.path import expanduser from os.path import isdir from os.path import join import os from os.path import split from os.path import dirname from os.path import relpath from os.path import splitext from os.path import basename from os.path import isfile from os.path import realpath import sys import warnings def is_modname_importable(modname, sys_path=None, exclude=None): """ Determines if a modname is importable based on your current sys.path Args: modname (str): name of module to check sys_path (list | None, default=None): if specified overrides ``sys.path`` exclude (list | None): list of directory paths. if specified prevents these directories from being searched. Returns: bool: True if the module can be imported Example: >>> is_modname_importable('xdoctest') True >>> is_modname_importable('not_a_real_module') False >>> is_modname_importable('xdoctest', sys_path=[]) False """ modpath = _syspath_modname_to_modpath(modname, sys_path=sys_path, exclude=exclude) flag = bool(modpath is not None) return flag def _importlib_import_modpath(modpath): # nocover """ Alternative to import_module_from_path using importlib mechainsms Args: modname (str): the module name. """ dpath, rel_modpath = split_modpath(modpath) modname = modpath_to_modname(modpath) import importlib.util spec = importlib.util.spec_from_file_location(modname, modpath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module def _pkgutil_modname_to_modpath(modname): # nocover """ faster version of :func:`_syspath_modname_to_modpath` using builtin python mechanisms, but unfortunately it doesn't play nice with pytest. Note: pkgutil.find_loader is deprecated in 3.12 and removed in 3.14 Args: modname (str): the module name. Example: >>> # xdoctest: +SKIP >>> modname = 'xdoctest.static_analysis' >>> _pkgutil_modname_to_modpath(modname) ...static_analysis.py >>> # xdoctest: +REQUIRES(CPython) >>> _pkgutil_modname_to_modpath('_ctypes') ..._ctypes... Ignore: >>> _pkgutil_modname_to_modpath('cv2') """ import pkgutil loader = pkgutil.find_loader(modname) if loader is None: raise Exception('No module named {} in the PYTHONPATH'.format(modname)) modpath = loader.get_filename().replace('.pyc', '.py') return modpath class PythonPathContext: """ Context for temporarily adding a dir to the PYTHONPATH. Used in testing, and used as a helper in certain ubelt functions. Warning: Even though this context manager takes precautions, this modifies ``sys.path``, and things can go wrong when that happens. This is generally safe as long as nothing else you do inside of this context modifies the path. If the path is modified in this context, we will try to detect it and warn. Attributes: dpath (str | PathLike): directory to insert into the PYTHONPATH index (int): position to add to. Typically either -1 or 0. Example: >>> import sys >>> with PythonPathContext('foo', -1): >>> assert sys.path[-1] == 'foo' >>> assert sys.path[-1] != 'foo' >>> with PythonPathContext('bar', 0): >>> assert sys.path[0] == 'bar' >>> assert sys.path[0] != 'bar' Example: >>> # xdoctest: +REQUIRES(module:pytest) >>> # Mangle the path inside the context >>> import sys >>> self = PythonPathContext('foo', 0) >>> self.__enter__() >>> sys.path.insert(0, 'mangled') >>> import pytest >>> with pytest.warns(UserWarning): >>> self.__exit__(None, None, None) Example: >>> # xdoctest: +REQUIRES(module:pytest) >>> import sys >>> self = PythonPathContext('foo', 0) >>> self.__enter__() >>> sys.path.remove('foo') >>> import pytest >>> with pytest.raises(RuntimeError): >>> self.__exit__(None, None, None) """ def __init__(self, dpath, index=0): """ Args: dpath (str | PathLike): directory to insert into the PYTHONPATH index (int): position to add to. Typically either -1 or 0. """ self.dpath = os.fspath(dpath) self.index = index def __enter__(self): if self.index < 0: self.index = len(sys.path) + self.index + 1 sys.path.insert(self.index, self.dpath) def __exit__(self, ex_type, ex_value, ex_traceback): """ Args: ex_type (Type[BaseException] | None): ex_value (BaseException | None): ex_traceback (TracebackType | None): Returns: bool | None """ need_recover = False if len(sys.path) <= self.index: # nocover msg_parts = [ 'sys.path changed while in PythonPathContext.', 'len(sys.path) = {!r} but index is {!r}'.format( len(sys.path), self.index), ] need_recover = True if sys.path[self.index] != self.dpath: # nocover # The path is not where we put it, the path must have been mangled msg_parts = [ 'sys.path changed while in PythonPathContext', 'Expected dpath={!r} at index={!r} in sys.path, but got ' 'dpath={!r}'.format( self.dpath, self.index, sys.path[self.index] ) ] need_recover = True if need_recover: # Try and find where the temporary path went try: real_index = sys.path.index(self.dpath) except ValueError: msg_parts.append('Expected dpath was not in sys.path') raise RuntimeError('\n'.join(msg_parts)) else: # We were able to recover, but warn the user. This method of # recovery is a heuristic and does not work in some cases. msg_parts.append(( 'Expected dpath was at index {}. ' 'This could indicate conflicting module namespaces.' ).format(real_index)) warnings.warn('\n'.join(msg_parts)) sys.path.pop(real_index) else: sys.path.pop(self.index) def _custom_import_modpath(modpath, index=-1): dpath, rel_modpath = split_modpath(modpath) modname = modpath_to_modname(modpath) try: with PythonPathContext(dpath, index=index): module = import_module_from_name(modname) except Exception as ex: # nocover msg_parts = [( 'ERROR: Failed to import modname={} with modpath={} and ' 'sys.path modified with {} at index={}').format( modname, modpath, repr(dpath), index) ] msg_parts.append('Caused by: {}'.format(repr(ex))) raise RuntimeError('\n'.join(msg_parts)) return module def import_module_from_path(modpath, index=-1): """ Imports a module via a filesystem path. This works by modifying ``sys.path``, importing the module name, and then attempting to undo the change to sys.path. This function may produce unexpected results in the case where the imported module itself itself modifies ``sys.path`` or if there is another conflicting module with the same name. Args: modpath (str | PathLike): Path to the module on disk or within a zipfile. Paths within a zipfile can be given by ``.zip/.py``. index (int): Location at which we modify PYTHONPATH if necessary. If your module name does not conflict, the safest value is -1, However, if there is a conflict, then use an index of 0. The default may change to 0 in the future. Returns: ModuleType: the imported module References: .. [SO_67631] https://stackoverflow.com/questions/67631/import-module-given-path Raises: IOError - when the path to the module does not exist ImportError - when the module is unable to be imported Note: If the module is part of a package, the package will be imported first. These modules may cause problems when reloading via IPython magic This can import a module from within a zipfile. To do this modpath should specify the path to the zipfile and the path to the module within that zipfile separated by a colon or pathsep. E.g. "/path/to/archive.zip:mymodule.pl" Warning: It is best to use this with paths that will not conflict with previously existing modules. If the modpath conflicts with a previously existing module name. And the target module does imports of its own relative to this conflicting path. In this case, the module that was loaded first will win. For example if you try to import '/foo/bar/pkg/mod.py' from the folder structure: .. code:: - foo/ +- bar/ +- pkg/ + __init__.py |- mod.py |- helper.py If there exists another module named ``pkg`` already in ``sys.modules`` and mod.py contains the code ``from . import helper``, Python will assume helper belongs to the ``pkg`` module already in ``sys.modules``. This can cause a NameError or worse --- an incorrect helper module. SeeAlso: :func:`import_module_from_name` Example: >>> # xdoctest: +SKIP("ubelt dependency") >>> import xdoctest >>> modpath = xdoctest.__file__ >>> module = ub.import_module_from_path(modpath) >>> assert module is xdoctest Example: >>> # Test importing a module from within a zipfile >>> # xdoctest: +SKIP("ubelt dependency") >>> import zipfile >>> from xdoctest import utils >>> import os >>> from os.path import join, expanduser, normpath >>> dpath = expanduser('~/.cache/xdoctest') >>> dpath = utils.ensuredir(dpath) >>> #dpath = utils.TempDir().ensure() >>> # Write to an external module named bar >>> external_modpath = join(dpath, 'bar.py') >>> # For pypy support we have to write this using with >>> with open(external_modpath, 'w') as file: >>> file.write('testvar = 1') >>> internal = 'folder/bar.py' >>> # Move the external bar module into a zipfile >>> zippath = join(dpath, 'myzip.zip') >>> with zipfile.ZipFile(zippath, 'w') as myzip: >>> myzip.write(external_modpath, internal) >>> # Import the bar module from within the zipfile >>> modpath = zippath + ':' + internal >>> modpath = zippath + os.path.sep + internal >>> module = ub.import_module_from_path(modpath) >>> assert normpath(module.__name__) == normpath('folder/bar') >>> assert module.testvar == 1 Example: >>> import pytest >>> # xdoctest: +SKIP("ubelt dependency") >>> with pytest.raises(IOError): >>> ub.import_module_from_path('does-not-exist') >>> with pytest.raises(IOError): >>> ub.import_module_from_path('does-not-exist.zip/') """ modpath = os.fspath(modpath) if not os.path.exists(modpath): import re import zipimport # We allow (if not prefer or force) the colon to be a path.sep in order # to agree with the mod.__name__ attribute that will be produced # TODO: we could codify this by using `util_zip.split_archive` # zip followed by colon or slash pat = '(.zip[' + re.escape(os.path.sep) + '/:])' parts = re.split(pat, modpath, flags=re.IGNORECASE) if len(parts) > 2: archivepath = ''.join(parts[:-1])[:-1] internal = parts[-1] modname = os.path.splitext(internal)[0] modname = os.path.normpath(modname) if os.path.exists(archivepath): zimp_file = zipimport.zipimporter(archivepath) try: try: module = zimp_file.load_module(modname) except Exception: # nocover module = zimp_file.load_module(modname.replace('\\', '/')) # hack except Exception as ex: # nocover text = ( 'Encountered error in import_module_from_path ' 'while calling load_module: ' 'modpath={modpath!r}, ' 'internal={internal!r}, ' 'modname={modname!r}, ' 'archivepath={archivepath!r}, ' 'ex={ex!r}' ).format( modpath=modpath, internal=internal, modname=modname, archivepath=archivepath, ex=ex) print(text) # raise raise Exception(text) return module raise IOError('modpath={} does not exist'.format(modpath)) else: # the importlib version does not work in pytest module = _custom_import_modpath(modpath, index=index) # TODO: use this implementation once pytest fixes importlib # module = _importlib_import_modpath(modpath) return module def import_module_from_name(modname): """ Imports a module from its string name (i.e. ``__name__``) This is a simple wrapper around :func:`importlib.import_module`, but is provided as a companion function to :func:`import_module_from_path`, which contains functionality not provided in the Python standard library. Args: modname (str): module name Returns: ModuleType: module SeeAlso: :func:`import_module_from_path` Example: >>> # test with modules that won't be imported in normal circumstances >>> # todo write a test where we guarantee this >>> # xdoctest: +SKIP("ubelt dependency") >>> import sys >>> modname_list = [ >>> 'pickletools', >>> 'lib2to3.fixes.fix_apply', >>> ] >>> #assert not any(m in sys.modules for m in modname_list) >>> modules = [ub.import_module_from_name(modname) for modname in modname_list] >>> assert [m.__name__ for m in modules] == modname_list >>> assert all(m in sys.modules for m in modname_list) """ if True: # See if this fixes the Docker issue we saw but were unable to # reproduce on another environment. Either way its better to use the # standard importlib implementation than the one I wrote a long time # ago. import importlib module = importlib.import_module(modname) else: # nocover # The __import__ statement is weird if '.' in modname: fromlist = modname.split('.')[-1] fromlist_ = list(map(str, fromlist)) # needs to be ascii for python2.7 module = __import__(modname, {}, {}, fromlist_, 0) else: module = __import__(modname, {}, {}, [], 0) return module def _parse_static_node_value(node): """ Extract a constant value from a node if possible Args: node (ast.AST): input node Returns: Any: parsed value """ import ast from collections import OrderedDict import numbers if isinstance(node, ast.Constant) and isinstance(node.value, numbers.Number): value = node.value elif isinstance(node, ast.Constant) and isinstance(node.value, str): value = node.value elif isinstance(node, ast.List): value = list(map(_parse_static_node_value, node.elts)) elif isinstance(node, ast.Tuple): value = tuple(map(_parse_static_node_value, node.elts)) elif isinstance(node, (ast.Dict)): keys = map(_parse_static_node_value, node.keys) values = map(_parse_static_node_value, node.values) value = OrderedDict(zip(keys, values)) elif isinstance(node, ast.Constant): value = node.value else: raise TypeError('Cannot parse a static value from non-static node ' 'of type: {!r}'.format(type(node))) return value def _extension_module_tags(): """ Returns valid tags an extension module might have Returns: List[str] """ import sysconfig tags = [] # handle PEP 3149 -- ABI version tagged .so files # ABI = application binary interface tags.append(sysconfig.get_config_var('SOABI')) tags.append('abi3') # not sure why this one is valid but it is tags = [t for t in tags if t] return tags def _static_parse(varname, fpath): """ Statically parse the a constant variable from a python file Args: varname (str): variable name to extract fpath (str | PathLike): path to python file to parse Returns: Any: the static value Example: >>> # xdoctest: +SKIP("ubelt dependency") >>> dpath = ub.Path.appdir('tests/import/staticparse').ensuredir() >>> fpath = (dpath / 'foo.py') >>> fpath.write_text('a = {1: 2}') >>> assert _static_parse('a', fpath) == {1: 2} >>> fpath.write_text('a = 2') >>> assert _static_parse('a', fpath) == 2 >>> fpath.write_text('a = "3"') >>> assert _static_parse('a', fpath) == "3" >>> fpath.write_text('a = ["3", 5, 6]') >>> assert _static_parse('a', fpath) == ["3", 5, 6] >>> fpath.write_text('a = ("3", 5, 6)') >>> assert _static_parse('a', fpath) == ("3", 5, 6) >>> fpath.write_text('b = 10' + chr(10) + 'a = None') >>> assert _static_parse('a', fpath) is None >>> import pytest >>> with pytest.raises(TypeError): >>> fpath.write_text('a = list(range(10))') >>> assert _static_parse('a', fpath) is None >>> with pytest.raises(AttributeError): >>> fpath.write_text('a = list(range(10))') >>> assert _static_parse('c', fpath) is None >>> if sys.version_info[0:2] >= (3, 6): >>> # Test with type annotations >>> fpath.write_text('b: int = 10') >>> assert _static_parse('b', fpath) == 10 """ import ast if not exists(fpath): raise ValueError('fpath={!r} does not exist'.format(fpath)) with open(fpath, 'r') as file_: sourcecode = file_.read() pt = ast.parse(sourcecode) class StaticVisitor(ast.NodeVisitor): def visit_Assign(self, node): for target in node.targets: target_id = getattr(target, 'id', None) if target_id == varname: self.static_value = _parse_static_node_value(node.value) def visit_AnnAssign(self, node): target = node.target target_id = getattr(target, 'id', None) if target_id == varname: self.static_value = _parse_static_node_value(node.value) visitor = StaticVisitor() visitor.visit(pt) try: value = visitor.static_value except AttributeError: value = 'Unknown {}'.format(varname) raise AttributeError(value) return value def _platform_pylib_exts(): # nocover """ Returns .so, .pyd, or .dylib depending on linux, win or mac. On python3 return the previous with and without abi (e.g. .cpython-35m-x86_64-linux-gnu) flags. On python2 returns with and without multiarch. Returns: tuple """ import sysconfig valid_exts = [] # return with and without API flags # handle PEP 3149 -- ABI version tagged .so files base_ext = '.' + sysconfig.get_config_var('EXT_SUFFIX').split('.')[-1] for tag in _extension_module_tags(): valid_exts.append('.' + tag + base_ext) valid_exts.append(base_ext) return tuple(valid_exts) def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None): """ syspath version of modname_to_modpath Args: modname (str): name of module to find sys_path (None | List[str | PathLike]): The paths to search for the module. If unspecified, defaults to ``sys.path``. exclude (List[str | PathLike] | None): If specified prevents these directories from being searched. Defaults to None. Returns: str: path to the module. Note: This is much slower than the pkgutil mechanisms. There seems to be a change to the editable install mechanism: https://github.com/pypa/setuptools/issues/3548 Trying to find more docs about it. TODO: add a test where we make an editable install, regular install, standalone install, and check that we always find the right path. Example: >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis')) ...static_analysis.py >>> print(_syspath_modname_to_modpath('xdoctest')) ...xdoctest >>> # xdoctest: +REQUIRES(CPython) >>> print(_syspath_modname_to_modpath('_ctypes')) ..._ctypes... >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None Example: >>> # test what happens when the module is not visible in the path >>> modname = 'xdoctest.static_analysis' >>> modpath = _syspath_modname_to_modpath(modname) >>> exclude = [split_modpath(modpath)[0]] >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> if found is not None: >>> # Note: the basic form of this test may fail if there are >>> # multiple versions of the package installed. Try and fix that. >>> other = split_modpath(found)[0] >>> assert other not in exclude >>> exclude.append(other) >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> if found is not None: >>> raise AssertionError( >>> 'should not have found {}.'.format(found) + >>> ' because we excluded: {}.'.format(exclude) + >>> ' cwd={} '.format(os.getcwd()) + >>> ' sys.path={} '.format(sys.path) >>> ) """ import glob def _isvalid(modpath, base): # every directory up to the module, should have an init subdir = dirname(modpath) while subdir and subdir != base: if not exists(join(subdir, '__init__.py')): return False subdir = dirname(subdir) return True _fname_we = modname.replace('.', os.path.sep) candidate_fnames = [ _fname_we + '.py', # _fname_we + '.pyc', # _fname_we + '.pyo', ] # Add extension library suffixes candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()] if sys_path is None: sys_path = sys.path # the empty string in sys.path indicates cwd. Change this to a '.' candidate_dpaths = ['.' if p == '' else p for p in sys_path] if exclude: def normalize(p): if sys.platform.startswith('win32'): # nocover return realpath(p).lower() else: return realpath(p) # Keep only the paths not in exclude real_exclude = {normalize(p) for p in exclude} candidate_dpaths = [p for p in candidate_dpaths if normalize(p) not in real_exclude] def check_dpath(dpath): # Check for directory-based modules (has precedence over files) modpath = join(dpath, _fname_we) if exists(modpath): if isfile(join(modpath, '__init__.py')): if _isvalid(modpath, dpath): return modpath # If that fails, check for file-based modules for fname in candidate_fnames: modpath = join(dpath, fname) if isfile(modpath): if _isvalid(modpath, dpath): return modpath _pkg_name = _fname_we.split(os.path.sep)[0] _pkg_name_hypen = _pkg_name.replace('_', '-') _egglink_fname1 = _pkg_name + '.egg-link' _egglink_fname2 = _pkg_name_hypen + '.egg-link' # FIXME! suffixed modules will clobber break! # Currently mitigating this by looping over all possible matches, # but it would be nice to ensure we are not matching suffixes. # however, we should probably match and handle different versions. _editable_fname_pth_pat = '__editable__.' + _pkg_name + '-*.pth' # NOTE: the __editable__ finders are named after the package, but the # module could have a different name, so we cannot use the package name # (which in this case is really the module name) in the pattern, and we # have to check all of the finders. # _editable_fname_finder_py_pat = '__editable___' + _pkg_name + '_*finder.py' _editable_fname_finder_py_pat = '__editable___*_*finder.py' found_modpath = None for dpath in candidate_dpaths: modpath = check_dpath(dpath) if modpath: found_modpath = modpath break # Attempt to handle PEP660 import hooks. # We should look for a finder path first, because a pth might # not contain a real path, but code to load the finder. # Which one is used is defined in setuptools/editable_wheel.py # It will depend on an "Editable Strategy". # Basically a finder will be used for "complex" structures and # basic pth will be used for "simple" structures (which means has a # src/modname folder). new_editable_finder_paths = sorted(glob.glob(join(dpath, _editable_fname_finder_py_pat))) if new_editable_finder_paths: # nocover # This makes some assumptions, which may not hold in general # We may need to fallback entirely on pkgutil, which would # ultimately be good. Hopefully the new standards mean it does not # break with pytest anymore? Nope, pytest still doesn't work right # with it. for finder_fpath in new_editable_finder_paths: try: mapping = _static_parse('MAPPING', finder_fpath) except AttributeError: ... else: try: target = dirname(mapping[_pkg_name]) except KeyError: ... else: if not exclude or normalize(target) not in real_exclude: # pragma: nobranch modpath = check_dpath(target) if modpath: # pragma: nobranch found_modpath = modpath break if found_modpath is not None: break # If a finder does not exist, then the __editable__ pth file might hold # the path itself. Check for that. new_editable_pth_paths = sorted(glob.glob(join(dpath, _editable_fname_pth_pat))) if new_editable_pth_paths: # nocover # Disable coverage because the test that covers this is too slow. # It can be made faster, re-enable when that lands. import pathlib for editable_pth in new_editable_pth_paths: editable_pth = pathlib.Path(editable_pth) target = editable_pth.read_text().strip().split('\n')[-1] if not exclude or normalize(target) not in real_exclude: modpath = check_dpath(target) if modpath: # pragma: nobranch found_modpath = modpath break if found_modpath is not None: break # If file path checks fails, check for egg-link based modules # (Python usually puts egg links into sys.path, but if the user is # providing the path then it is important to check them explicitly) linkpath1 = join(dpath, _egglink_fname1) linkpath2 = join(dpath, _egglink_fname2) linkpath = None if isfile(linkpath1): # nocover linkpath = linkpath1 elif isfile(linkpath2): # nocover linkpath = linkpath2 if linkpath is not None: # nocover # We exclude this from coverage because its difficult to write a # unit test where we can enforce that there is a module installed # in development mode. # Note: the new test_editable_modules.py test can do this, but # this old method may no longer be supported. # TODO: ensure this is the correct way to parse egg-link files # https://setuptools.readthedocs.io/en/latest/formats.html#egg-links # The docs state there should only be one line, but I see two. with open(linkpath, 'r') as file: target = file.readline().strip() if not exclude or normalize(target) not in real_exclude: modpath = check_dpath(target) if modpath: found_modpath = modpath break return found_modpath def modname_to_modpath(modname, hide_init=True, hide_main=False, sys_path=None): """ Finds the path to a python module from its name. Determines the path to a python module without directly import it Converts the name of a module (__name__) to the path (__file__) where it is located without importing the module. Returns None if the module does not exist. Args: modname (str): The name of a module in ``sys_path``. hide_init (bool): if False, __init__.py will be returned for packages. Defaults to True. hide_main (bool): if False, and ``hide_init`` is True, __main__.py will be returned for packages, if it exists. Defaults to False. sys_path (None | List[str | PathLike]): The paths to search for the module. If unspecified, defaults to ``sys.path``. Returns: str | None: modpath - path to the module, or None if it doesn't exist Example: >>> modname = 'xdoctest.__main__' >>> modpath = modname_to_modpath(modname, hide_main=False) >>> assert modpath.endswith('__main__.py') >>> modname = 'xdoctest' >>> modpath = modname_to_modpath(modname, hide_init=False) >>> assert modpath.endswith('__init__.py') >>> # xdoctest: +REQUIRES(CPython) >>> modpath = basename(modname_to_modpath('_ctypes')) >>> assert 'ctypes' in modpath """ if hide_main or sys_path: modpath = _syspath_modname_to_modpath(modname, sys_path) else: # import xdev # with xdev.embed_on_exception_context: # try: # modpath = _importlib_modname_to_modpath(modname) # except Exception: # modpath = _syspath_modname_to_modpath(modname, sys_path) # modpath = _pkgutil_modname_to_modpath(modname, sys_path) modpath = _syspath_modname_to_modpath(modname, sys_path) if modpath is None: return None modpath = normalize_modpath(modpath, hide_init=hide_init, hide_main=hide_main) return modpath def normalize_modpath(modpath, hide_init=True, hide_main=False): """ Normalizes __init__ and __main__ paths. Args: modpath (str | PathLike): path to a module hide_init (bool): if True, always return package modules as __init__.py files otherwise always return the dpath. Defaults to True. hide_main (bool): if True, always strip away main files otherwise ignore __main__.py. Defaults to False. Returns: str | PathLike: a normalized path to the module Note: Adds __init__ if reasonable, but only removes __main__ by default Example: >>> from xdoctest import static_analysis as module >>> modpath = module.__file__ >>> assert normalize_modpath(modpath) == modpath.replace('.pyc', '.py') >>> dpath = dirname(modpath) >>> res0 = normalize_modpath(dpath, hide_init=0, hide_main=0) >>> res1 = normalize_modpath(dpath, hide_init=0, hide_main=1) >>> res2 = normalize_modpath(dpath, hide_init=1, hide_main=0) >>> res3 = normalize_modpath(dpath, hide_init=1, hide_main=1) >>> assert res0.endswith('__init__.py') >>> assert res1.endswith('__init__.py') >>> assert not res2.endswith('.py') >>> assert not res3.endswith('.py') """ if hide_init: if basename(modpath) == '__init__.py': modpath = dirname(modpath) hide_main = True else: # add in init, if reasonable modpath_with_init = join(modpath, '__init__.py') if exists(modpath_with_init): modpath = modpath_with_init if hide_main: # We can remove main, but dont add it if basename(modpath) == '__main__.py': # corner case where main might just be a module name not in a pkg parallel_init = join(dirname(modpath), '__init__.py') if exists(parallel_init): modpath = dirname(modpath) return modpath def modpath_to_modname(modpath, hide_init=True, hide_main=False, check=True, relativeto=None): """ Determines importable name from file path Converts the path to a module (__file__) to the importable python name (__name__) without importing the module. The filename is converted to a module name, and parent directories are recursively included until a directory without an __init__.py file is encountered. Args: modpath (str): Module filepath hide_init (bool): Removes the __init__ suffix. Defaults to True. hide_main (bool): Removes the __main__ suffix. Defaults to False. check (bool): If False, does not raise an error if modpath is a dir and does not contain an __init__ file. Defaults to True. relativeto (str | None): If specified, all checks are ignored and this is considered the path to the root module. Defaults to None. TODO: - [ ] Does this need modification to support PEP 420? https://www.python.org/dev/peps/pep-0420/ Returns: str: modname Raises: ValueError: if check is True and the path does not exist Example: >>> from xdoctest import static_analysis >>> modpath = static_analysis.__file__.replace('.pyc', '.py') >>> modpath = modpath.replace('.pyc', '.py') >>> modname = modpath_to_modname(modpath) >>> assert modname == 'xdoctest.static_analysis' Example: >>> import xdoctest >>> assert modpath_to_modname(xdoctest.__file__.replace('.pyc', '.py')) == 'xdoctest' >>> assert modpath_to_modname(dirname(xdoctest.__file__.replace('.pyc', '.py'))) == 'xdoctest' Example: >>> # xdoctest: +REQUIRES(CPython) >>> modpath = modname_to_modpath('_ctypes') >>> modname = modpath_to_modname(modpath) >>> assert modname == '_ctypes' Example: >>> modpath = '/foo/libfoobar.linux-x86_64-3.6.so' >>> modname = modpath_to_modname(modpath, check=False) >>> assert modname == 'libfoobar' """ if check and relativeto is None: if not exists(modpath): raise ValueError('modpath={} does not exist'.format(modpath)) modpath_ = abspath(expanduser(modpath)) modpath_ = normalize_modpath(modpath_, hide_init=hide_init, hide_main=hide_main) if relativeto: dpath = dirname(abspath(expanduser(relativeto))) rel_modpath = relpath(modpath_, dpath) else: dpath, rel_modpath = split_modpath(modpath_, check=check) modname = splitext(rel_modpath)[0] if '.' in modname: modname, abi_tag = modname.split('.', 1) modname = modname.replace('/', '.') modname = modname.replace('\\', '.') return modname def split_modpath(modpath, check=True): """ Splits the modpath into the dir that must be in PYTHONPATH for the module to be imported and the modulepath relative to this directory. Args: modpath (str): module filepath check (bool): if False, does not raise an error if modpath is a directory and does not contain an ``__init__.py`` file. Returns: Tuple[str, str]: (directory, rel_modpath) Raises: ValueError: if modpath does not exist or is not a package Example: >>> from xdoctest import static_analysis >>> modpath = static_analysis.__file__.replace('.pyc', '.py') >>> modpath = abspath(modpath) >>> dpath, rel_modpath = split_modpath(modpath) >>> recon = join(dpath, rel_modpath) >>> assert recon == modpath >>> assert rel_modpath == join('xdoctest', 'static_analysis.py') """ modpath_ = abspath(expanduser(modpath)) if check: if not exists(modpath_): if not exists(modpath): raise ValueError('modpath={} does not exist'.format(modpath)) raise ValueError('modpath={} is not a module'.format(modpath)) if isdir(modpath_) and not exists(join(modpath, '__init__.py')): # dirs without inits are not modules raise ValueError('modpath={} is not a module'.format(modpath)) full_dpath, fname_ext = split(modpath_) _relmod_parts = [fname_ext] # Recurse down directories until we are out of the package dpath = full_dpath while exists(join(dpath, '__init__.py')): dpath, dname = split(dpath) _relmod_parts.append(dname) relmod_parts = _relmod_parts[::-1] rel_modpath = os.path.sep.join(relmod_parts) return dpath, rel_modpath Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_import.pyi000066400000000000000000000026341505122333300244120ustar00rootroot00000000000000from os import PathLike from types import ModuleType from typing import List from typing import Tuple from _typeshed import Incomplete def is_modname_importable(modname: str, sys_path: list | None = None, exclude: list | None = None) -> bool: ... class PythonPathContext: dpath: Incomplete index: Incomplete def __init__(self, dpath, index: int = ...) -> None: ... def __enter__(self) -> None: ... def __exit__(self, type, value, trace) -> None: ... def import_module_from_path(modpath: str | PathLike, index: int = ...) -> ModuleType: ... def import_module_from_name(modname: str) -> ModuleType: ... def modname_to_modpath( modname: str, hide_init: bool = True, hide_main: bool = False, sys_path: None | List[str | PathLike] = None) -> str | None: ... def normalize_modpath(modpath: str | PathLike, hide_init: bool = True, hide_main: bool = False) -> str | PathLike: ... def modpath_to_modname(modpath: str, hide_init: bool = True, hide_main: bool = False, check: bool = True, relativeto: str | None = None) -> str: ... def split_modpath(modpath: str, check: bool = True) -> Tuple[str, str]: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_misc.py000066400000000000000000000077201505122333300236630ustar00rootroot00000000000000""" Utilities that are mainly used in self-testing """ from os.path import join import random from .util_path import TempDir class TempDoctest: """ Creates a temporary file containing a module-level doctest for testing Example: >>> from xdoctest import core >>> self = TempDoctest('>>> a = 1') >>> doctests = list(core.parse_doctestables(self.modpath)) >>> assert len(doctests) == 1 """ def __init__(self, docstr, modname=None): if modname is None: # make a random temporary module name alphabet = list(map(chr, range(97, 97 + 26))) modname = ''.join([random.choice(alphabet) for _ in range(8)]) self.modname = modname self.docstr = docstr self.temp = TempDir() self.dpath = self.temp.ensure() self.modpath = join(self.dpath, self.modname + '.py') with open(self.modpath, 'w') as file: file.write("'''\n%s'''" % self.docstr) class TempModule: """ Creates a temporary directory with a python module. Example: >>> from xdoctest import core >>> self = TempDoctest('>>> a = 1') >>> doctests = list(core.parse_doctestables(self.modpath)) >>> assert len(doctests) == 1 """ def __init__(self, module_text, modname=None): if modname is None: # make a random temporary module name alphabet = list(map(chr, range(97, 97 + 26))) modname = ''.join([random.choice(alphabet) for _ in range(8)]) self.modname = modname self.module_text = module_text self.temp = TempDir() self.dpath = self.temp.ensure() self.modpath = join(self.dpath, self.modname + '.py') with open(self.modpath, 'w') as file: file.write(module_text) def print_contents(self): """ For debugging on windows """ import pathlib import os print(f'--- ---') print(f'self.modname={self.modname!r}') print(f'self.dpath={self.dpath!r}') print(f'self.modpath={self.modpath!r}') dpath_exists1 = os.path.exists(self.dpath) modpath_exists1 = os.path.exists(self.modpath) print(f'dpath_exists1={dpath_exists1}') print(f'modpath_exists1={modpath_exists1}') dpath = pathlib.Path(self.dpath) print(f'dpath={dpath}') dpath_exists = dpath.exists() print(f'dpath_exists={dpath_exists!r}') modpath = pathlib.Path(self.modpath) print(f'modpath={modpath!r}') modpath_exists = modpath.exists() print(f'modpath_exists={modpath_exists!r}') modpath_contents = modpath.read_text() print(f'modpath_contents={modpath_contents!r}') print(f'--- ---') def _run_case(source, style='auto'): """ Runs all doctests in a source block Args: source (str): source code of an entire file TODO: run case is over-duplicated and should be separated into a test utils directory """ from xdoctest import utils from xdoctest import runner COLOR = 'yellow' def cprint(msg, color=COLOR): print(utils.color_text(str(msg), COLOR)) cprint('\n\n' '\n ' '\n ======== ' '\n', COLOR) cprint('CASE SOURCE:') cprint('------------') print(utils.indent( utils.add_line_numbers(utils.highlight_code(source, 'python')))) print('') import hashlib hasher = hashlib.sha1() hasher.update(source.encode('utf8')) hashid = hasher.hexdigest()[0:8] with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_linenos_' + hashid + '.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout(suppress=False) as cap: runner.doctest_module(modpath, 'all', argv=[''], style=style) cprint('\n\n --- --- \n\n', COLOR) return cap.text Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_misc.pyi000066400000000000000000000007461505122333300240350ustar00rootroot00000000000000from _typeshed import Incomplete class TempDoctest: modname: Incomplete docstr: Incomplete temp: Incomplete dpath: Incomplete modpath: Incomplete def __init__(self, docstr, modname: Incomplete | None = ...) -> None: ... class TempModule: modname: Incomplete module_text: Incomplete temp: Incomplete dpath: Incomplete modpath: Incomplete def __init__(self, module_text, modname: Incomplete | None = ...) -> None: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_mixins.py000066400000000000000000000031511505122333300242310ustar00rootroot00000000000000""" Port of NiceRepr from ubelt.util_mixins """ class NiceRepr: """ Defines `__str__` and `__repr__` in terms of `__nice__` function Classes that inherit `NiceRepr` must define `__nice__` Example: >>> class Foo(NiceRepr): ... pass >>> class Bar(NiceRepr): ... def __nice__(self): ... return 'info' >>> foo = Foo() >>> bar = Bar() >>> assert str(bar) == '' >>> assert repr(bar).startswith('>> assert 'object at' in str(foo) >>> assert 'object at' in repr(foo) """ def __repr__(self): try: classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s) at %s>' % (classname, devnice, hex(id(self))) except AttributeError: if hasattr(self, '__nice__'): raise # warnings.warn('Define the __nice__ method for %r' % # (self.__class__,), category=RuntimeWarning) return object.__repr__(self) #return super(NiceRepr, self).__repr__() def __str__(self): try: classname = self.__class__.__name__ devnice = self.__nice__() return '<%s(%s)>' % (classname, devnice) except AttributeError: if hasattr(self, '__nice__'): raise # warnings.warn('Define the __nice__ method for %r' % # (self.__class__,), category=RuntimeWarning) return object.__str__(self) #return super(NiceRepr, self).__str__() Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_mixins.pyi000066400000000000000000000000301505122333300243730ustar00rootroot00000000000000class NiceRepr: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_notebook.py000066400000000000000000000247121505122333300245500ustar00rootroot00000000000000""" Utilities for handling Jupyter / IPython notebooks This code is copied and modified from nbimporter (https://github.com/grst/nbimporter/blob/master/nbimporter.py) which is not actively maintained (otherwise we would use it as a dependency). Note that using this behavior is very much discouraged, it would be far better if you maintained your reusable code in separate python modules. See https://github.com/grst/nbimporter for reasons. ---- Allow for importing of IPython Notebooks as modules from Jupyter v4. Updated from module collated here: https://github.com/adrn/ipython/blob/master/examples/Notebook/Importing%20Notebooks.ipynb Importing from a notebook is different from a module: because one typically keeps many computations and tests besides exportable defs, here we only run code which either defines a function or a class, or imports code from other modules and notebooks. This behaviour can be disabled by setting NotebookLoader.default_options['only_defs'] = False. Furthermore, in order to provide per-notebook initialisation, if a special function __nbinit__() is defined in the notebook, it will be executed the first time an import statement is. This behaviour can be disabled by setting NotebookLoader.default_options['run_nbinit'] = False. Finally, you can set the encoding of the notebooks with NotebookLoader.default_options['encoding']. The default is 'utf-8'. """ import io import os import sys import types import ast from os.path import basename, dirname def _find_notebook(fullname, path=None): """ Find a notebook, given its fully qualified name and an optional path This turns "foo.bar" into "foo/bar.ipynb" and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar does not exist. Example: >>> # xdoctest: +REQUIRES(PY3, module:IPython, module:nbconvert) >>> from xdoctest.utils.util_notebook import _find_notebook >>> from xdoctest import utils >>> from os.path import join, basename, splitext >>> self = utils.TempDir() >>> dpath = self.ensure() >>> fpath = join(dpath, 'test_import_notebook.ipynb') >>> cells = ['x = 1'] >>> _make_test_notebook_fpath(fpath, cells) >>> fullname = splitext(basename(fpath))[0] >>> path = [dpath] >>> _find_notebook(fullname, path) ...test_import_notebook.ipynb >>> _find_notebook(fullname, None) None """ name = fullname.rsplit('.', 1)[-1] if not path: path = [''] for d in path: nb_path = os.path.join(d, name + ".ipynb") if os.path.isfile(nb_path): return nb_path # let import Notebook_Name find "Notebook Name.ipynb" nb_path = nb_path.replace("_", " ") if os.path.isfile(nb_path): return nb_path class CellDeleter(ast.NodeTransformer): """ Removes all nodes from an AST which are not suitable for exporting out of a notebook. """ def visit(self, node): """ Visit a node. """ if node.__class__.__name__ in ['Module', 'FunctionDef', 'ClassDef', 'Import', 'ImportFrom']: return node return None class NotebookLoader: """ Module Loader for Jupyter Notebooks. """ default_options = { 'only_defs': False, 'run_nbinit': True, 'encoding': 'utf-8' } def __init__(self, path=None): from IPython.core.interactiveshell import InteractiveShell self.shell = InteractiveShell.instance() self.path = path self.options = self.default_options.copy() def load_module(self, fullname=None, fpath=None): """import a notebook as a module""" from IPython import get_ipython import nbformat if fpath is None: fpath = _find_notebook(fullname, self.path) # load the notebook object nb_version = nbformat.current_nbformat with io.open(fpath, 'r', encoding=self.options['encoding']) as f: nb = nbformat.read(f, nb_version) # create the module and add it to sys.modules # if name in sys.modules: # return sys.modules[name] mod = types.ModuleType(fullname) mod.__file__ = fpath mod.__loader__ = self mod.__dict__['get_ipython'] = get_ipython # Only do something if it's a python notebook # if nb.metadata.kernelspec.language != 'python': # print("Ignoring '%s': not a python notebook." % fpath) # return mod # print("Importing Jupyter notebook from %s" % fpath) sys.modules[fullname] = mod # extra work to ensure that magics that would affect the user_ns # actually affect the notebook module's ns save_user_ns = self.shell.user_ns self.shell.user_ns = mod.__dict__ try: deleter = CellDeleter() for cell in filter(lambda c: c.cell_type == 'code', nb.cells): # transform the input into executable Python code = self.shell.input_transformer_manager.transform_cell(cell.source) if self.options['only_defs']: # Remove anything that isn't a def or a class tree = deleter.generic_visit(ast.parse(code)) else: tree = ast.parse(code) # run the code in the module codeobj = compile(tree, filename=fpath, mode='exec') exec(codeobj, mod.__dict__) finally: self.shell.user_ns = save_user_ns # Run any initialisation if available, but only once if self.options['run_nbinit'] and '__nbinit_done__' not in mod.__dict__: try: mod.__nbinit__() mod.__nbinit_done__ = True except (KeyError, AttributeError): pass return mod def import_notebook_from_path(ipynb_fpath, only_defs=False): """ Import an IPython notebook as a module from a full path and try to maintain clean sys.path variables. Args: ipynb_fpath (str | PathLike): path to the ipython notebook file to import only_defs (bool, default=False): if True ignores all non-definition statements Example: >>> # xdoctest: +REQUIRES(PY3, module:IPython, module:nbconvert) >>> from xdoctest import utils >>> from os.path import join >>> self = utils.TempDir() >>> dpath = self.ensure() >>> ipynb_fpath = join(dpath, 'test_import_notebook.ipydb') >>> cells = [ >>> utils.codeblock( >>> ''' >>> def foo(): >>> return 'bar' >>> '''), >>> utils.codeblock( >>> ''' >>> x = 1 >>> ''') >>> ] >>> _make_test_notebook_fpath(ipynb_fpath, cells) >>> module = import_notebook_from_path(ipynb_fpath) >>> assert module.foo() == 'bar' >>> assert module.x == 1 """ ipynb_fname = basename(ipynb_fpath) fname_noext = ipynb_fname.rsplit('.', 1)[0] ipynb_modname = fname_noext.replace(' ', '_') # hack around the importlib machinery loader = NotebookLoader() loader.options['only_defs'] = only_defs module = loader.load_module(ipynb_modname, ipynb_fpath) return module def execute_notebook(ipynb_fpath, timeout=None, verbose=None): """ Execute an IPython notebook in a separate kernel Args: ipynb_fpath (str | PathLike): path to the ipython notebook file to import Returns: nbformat.notebooknode.NotebookNode : nb The executed notebook. dict: resources Additional resources used in the conversion process. Example: >>> # xdoctest: +REQUIRES(PY3, module:IPython, module:nbconvert, CPYTHON) >>> from xdoctest import utils >>> from os.path import join >>> self = utils.TempDir() >>> dpath = self.ensure() >>> ipynb_fpath = join(dpath, 'hello_world.ipydb') >>> _make_test_notebook_fpath(ipynb_fpath, [utils.codeblock( >>> ''' >>> print('hello world') >>> ''')]) >>> nb, resources = execute_notebook(ipynb_fpath, verbose=3) >>> print('resources = {!r}'.format(resources)) >>> print('nb = {!r}'.format(nb)) >>> for cell in nb['cells']: >>> if len(cell['outputs']) != 1: >>> import warnings >>> warnings.warn('expected an output, is this the issue ' >>> 'described [here](https://github.com/nteract/papermill/issues/426)?') """ import nbformat import logging from nbconvert.preprocessors import ExecutePreprocessor dpath = dirname(ipynb_fpath) ep = ExecutePreprocessor(timeout=timeout) if verbose is None: verbose = 0 if verbose > 1: print('executing notebook in dpath = {!r}'.format(dpath)) ep.log.setLevel(logging.DEBUG) elif verbose > 0: ep.log.setLevel(logging.INFO) with open(ipynb_fpath, 'r+') as file: nb = nbformat.read(file, as_version=nbformat.NO_CONVERT) nb, resources = ep.preprocess(nb, {'metadata': {'path': dpath}}) # from nbconvert.preprocessors import executenb # nb, resources = executenb(nb, cwd=dpath) return nb, resources def _make_test_notebook_fpath(fpath, cell_sources): """ Helper for testing Args: fpath (str): file to write notebook to cell_sources (List[str]): list of python code blocks References: https://stackoverflow.com/questions/38193878/create-notebook-from-code https://gist.github.com/fperez/9716279 """ import nbformat as nbf import json import jupyter_client.kernelspec # TODO: is there an API to generate kernelspec json correctly? kernel_name = jupyter_client.kernelspec.NATIVE_KERNEL_NAME spec = jupyter_client.kernelspec.get_kernel_spec(kernel_name) metadata = {'kernelspec': { 'name': kernel_name, 'display_name': spec.display_name, 'language': spec.language, }} # Use nbformat API to create notebook structure and cell json nb = nbf.v4.new_notebook(metadata=metadata) for source in cell_sources: nb['cells'].append(nbf.v4.new_code_cell(source)) with open(fpath, 'w') as file: json.dump(nb, file) return fpath if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/xdoctest/utils/util_notebook.py all """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_notebook.pyi000066400000000000000000000014401505122333300247120ustar00rootroot00000000000000from os import PathLike import ast from _typeshed import Incomplete class CellDeleter(ast.NodeTransformer): def visit(self, node): ... class NotebookLoader: default_options: Incomplete shell: Incomplete path: Incomplete options: Incomplete def __init__(self, path: Incomplete | None = ...) -> None: ... def load_module(self, fullname: Incomplete | None = ..., fpath: Incomplete | None = ...): ... def import_notebook_from_path(ipynb_fpath: str | PathLike, only_defs: bool = False): ... def execute_notebook(ipynb_fpath: str | PathLike, timeout: Incomplete | None = ..., verbose: Incomplete | None = ...) -> dict: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_path.py000066400000000000000000000044721505122333300236650ustar00rootroot00000000000000""" Utilities related to filesystem paths """ import os from os.path import exists from os.path import join from os.path import normpath import shutil class TempDir: """ Context for creating and cleaning up temporary files. Used in testing. Example: >>> with TempDir() as self: >>> dpath = self.dpath >>> assert exists(dpath) >>> assert not exists(dpath) Example: >>> self = TempDir() >>> dpath = self.ensure() >>> assert exists(dpath) >>> self.cleanup() >>> assert not exists(dpath) """ def __init__(self, persist=False): self.dpath = None self.persist = persist def __del__(self): self.cleanup() def ensure(self): import tempfile import sys if not self.dpath: dpath = tempfile.mkdtemp() if sys.platform.startswith('win32'): # Force a long path # References: # https://stackoverflow.com/questions/11420689/how-to-get-long-file-system-path-from-python-on-windows from ctypes import create_unicode_buffer, windll BUFFER_SIZE = 500 buffer = create_unicode_buffer(BUFFER_SIZE) get_long_path_name = windll.kernel32.GetLongPathNameW get_long_path_name(dpath, buffer, BUFFER_SIZE) dpath = buffer.value self.dpath = dpath return self.dpath def cleanup(self): if not self.persist: if self.dpath: shutil.rmtree(self.dpath) self.dpath = None def __enter__(self): self.ensure() return self def __exit__(self, type_, value, trace): self.cleanup() def ensuredir(dpath, mode=0o1777): """ Ensures that directory will exist. creates new dir with sticky bits by default Args: dpath (str): dir to ensure. Can also be a tuple to send to join mode (int): octal mode of directory (default 0o1777) Returns: str: path - the ensured directory """ if isinstance(dpath, (list, tuple)): # nocover dpath = join(*dpath) if not exists(dpath): try: os.makedirs(normpath(dpath), mode=mode) except OSError: # nocover raise return dpath Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_path.pyi000066400000000000000000000007031505122333300240270ustar00rootroot00000000000000from _typeshed import Incomplete class TempDir: dpath: Incomplete persist: Incomplete def __init__(self, persist: bool = ...) -> None: ... def __del__(self) -> None: ... def ensure(self): ... def cleanup(self) -> None: ... def __enter__(self): ... def __exit__(self, type_, value, trace) -> None: ... def ensuredir(dpath: str, mode: int = 1023) -> str: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_str.py000066400000000000000000000241021505122333300235310ustar00rootroot00000000000000""" Utilities related to string manipulations """ import math import textwrap import warnings import re import os import sys # Global state that determines if ANSI-coloring text is allowed # (which is mainly to address non-ANSI compliant windows consoles) # compliant with https://no-color.org/ NO_COLOR = bool(os.environ.get('NO_COLOR')) def strip_ansi(text): r""" Removes all ansi directives from the string. Args: text (str): Returns: str References: http://stackoverflow.com/questions/14693701/remove-ansi https://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences Examples: >>> line = '\t\u001b[0;35mBlabla\u001b[0m \u001b[0;36m172.18.0.2\u001b[0m' >>> escaped_line = strip_ansi(line) >>> assert escaped_line == '\tBlabla 172.18.0.2' """ # ansi_escape1 = re.compile(r'\x1b[^m]*m') # text = ansi_escape1.sub('', text) # ansi_escape2 = re.compile(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?') ansi_escape3 = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]', flags=re.IGNORECASE) text = ansi_escape3.sub('', text) return text def color_text(text, color): r""" Colorizes text a single color using ansii tags. Args: text (str): text to colorize color (str): may be one of the following: yellow, blink, lightgray, underline, darkyellow, blue, darkblue, faint, fuchsia, black, white, red, brown, turquoise, bold, darkred, darkgreen, reset, standout, darkteal, darkgray, overline, purple, green, teal, fuscia Returns: str: colorized text. If pygments is not installed plain text is returned. Example: >>> import sys >>> if sys.platform.startswith('win32'): >>> import pytest >>> pytest.skip() >>> text = 'raw text' >>> from xdoctest import utils >>> from xdoctest.utils import util_str >>> if utils.modname_to_modpath('pygments') and not util_str.NO_COLOR: >>> # Colors text only if pygments is installed >>> import pygments >>> print('pygments = {!r}'.format(pygments)) >>> ansi_text1 = color_text(text, 'red') >>> print('ansi_text1 = {!r}'.format(ansi_text1)) >>> ansi_text = utils.ensure_unicode(ansi_text1) >>> prefix = utils.ensure_unicode('\x1b[31') >>> print('prefix = {!r}'.format(prefix)) >>> print('ansi_text = {!r}'.format(ansi_text)) >>> assert ansi_text.startswith(prefix) >>> assert color_text(text, None) == 'raw text' >>> else: >>> # Otherwise text passes through unchanged >>> assert color_text(text, 'red') == 'raw text' >>> assert color_text(text, None) == 'raw text' """ if NO_COLOR or color is None: return text try: if sys.platform.startswith('win32'): # nocover # Hack on win32 to support colored output try: import colorama if not colorama.initialise.atexit_done: # Only init if it hasn't been done colorama.init() except ImportError: warnings.warn( 'colorama is not installed, ansi colors may not work') # import os # if os.environ.get('XDOC_WIN32_COLORS', 'False') == 'False': # # hack: dont color on windows by default, but do init colorama # return text import pygments import pygments.console try: ansi_text = pygments.console.colorize(color, text) except KeyError: warnings.warn('unable to find color: {!r}'.format(color)) return text except Exception as ex: # nocover warnings.warn('some other issue with text color: {!r}'.format(ex)) return text return ansi_text except ImportError: # nocover warnings.warn('pygments is not installed, text will not be colored') return text def ensure_unicode(text): """ Casts bytes into utf8 (mostly for python2 compatibility) Args: text (str): Returns: str References: http://stackoverflow.com/questions/12561063/python-extract-data-from-file CommandLine: python -m xdoctest.utils ensure_unicode Example: >>> assert ensure_unicode('my ünicôdé strįng') == 'my ünicôdé strįng' >>> assert ensure_unicode('text1') == 'text1' >>> assert ensure_unicode('text1'.encode('utf8')) == 'text1' >>> assert ensure_unicode('text1'.encode('utf8')) == 'text1' >>> import codecs >>> assert (codecs.BOM_UTF8 + 'text»¿'.encode('utf8')).decode('utf8') """ if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf8') else: # nocover raise ValueError('unknown input type {!r}'.format(text)) def indent(text, prefix=' '): r""" Indents a block of text Args: text (str): text to indent prefix (str): prefix to add to each line (default = ' ') Returns: str: indented text CommandLine: python -m xdoctest.utils ensure_unicode Example: >>> text = 'Lorem ipsum\ndolor sit amet' >>> prefix = ' ' >>> result = indent(text, prefix) >>> assert all(t.startswith(prefix) for t in result.split('\n')) """ return prefix + text.replace('\n', '\n' + prefix) def highlight_code(text, lexer_name='python', **kwargs): """ Highlights a block of text using ansi tags based on language syntax. Args: text (str): plain text to highlight lexer_name (str): name of language **kwargs: passed to pygments.lexers.get_lexer_by_name Returns: str: text : highlighted text If pygments is not installed, the plain text is returned. CommandLine: python -c "import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))" Example: >>> text = 'import xdoctest as xdoc; print(xdoc)' >>> new_text = highlight_code(text) >>> print(new_text) """ if NO_COLOR: return text # Resolve extensions to languages lexer_name = { 'py': 'python', 'h': 'cpp', 'cpp': 'cpp', 'cxx': 'cpp', 'c': 'cpp', }.get(lexer_name.replace('.', ''), lexer_name) try: if sys.platform.startswith('win32'): # nocover # Hack on win32 to support colored output try: import colorama if not colorama.initialise.atexit_done: # Only init if it hasn't been done colorama.init() except ImportError: warnings.warn( 'colorama is not installed, ansi colors may not work') # import os # if os.environ.get('XDOC_WIN32_COLORS', 'False') == 'False': # # hack: dont color on windows by default, but do init colorama # return text import pygments import pygments.lexers import pygments.formatters import pygments.formatters.terminal formatter = pygments.formatters.terminal.TerminalFormatter(bg='dark') lexer = pygments.lexers.get_lexer_by_name(lexer_name, ensurenl=False, **kwargs) new_text = pygments.highlight(text, lexer, formatter) # formatter = pygments.formatters.terminal.TerminalFormatter(bg='dark') # lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs) # new_text = pygments.highlight(text, lexer, formatter) except ImportError: # nocover warnings.warn('pygments is not installed, code will not be highlighted') new_text = text return new_text def add_line_numbers(source, start=1, n_digits=None): """ Prefixes code with line numbers Args: source (str | List[str]): start (int): n_digits (int | None): Returns: List[str] | str Example: >>> print(chr(10).join(add_line_numbers(['a', 'b', 'c']))) 1 a 2 b 3 c >>> print(add_line_numbers(chr(10).join(['a', 'b', 'c']))) 1 a 2 b 3 c """ was_string = isinstance(source, str) part_lines = source.splitlines() if was_string else source if n_digits is None: endline = start + len(part_lines) n_digits = math.log(max(1, endline), 10) n_digits = int(math.ceil(n_digits)) src_fmt = '{count:{n_digits}d} {line}' part_lines = [ src_fmt.format(n_digits=n_digits, count=count, line=line) for count, line in enumerate(part_lines, start=start) ] if was_string: return '\n'.join(part_lines) else: return part_lines def codeblock(block_str): """ Wraps multiline string blocks and returns unindented code. Useful for templated code defined in indented parts of code. Args: block_str (str): typically in the form of a multiline string Returns: str: the unindented string Example: >>> # Simulate an indented part of code >>> if True: ... # notice the indentation on this will be normal ... codeblock_version = codeblock( ... ''' ... def foo(): ... return 'bar' ... ''' ... ) ... # notice the indentation and newlines on this will be odd ... normal_version = (''' ... def foo(): ... return 'bar' ... ''') >>> assert normal_version != codeblock_version >>> print('Without codeblock') >>> print(normal_version) >>> print('With codeblock') >>> print(codeblock_version) """ return textwrap.dedent(block_str).strip('\n') if __name__ == '__main__': """ CommandLine: python -m xdoctest.utils.util_str all """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_str.pyi000066400000000000000000000011121505122333300236760ustar00rootroot00000000000000from typing import List from _typeshed import Incomplete NO_COLOR: Incomplete def strip_ansi(text: str) -> str: ... def color_text(text: str, color: str) -> str: ... def ensure_unicode(text: str) -> str: ... def indent(text: str, prefix: str = ' ') -> str: ... def highlight_code(text: str, lexer_name: str = 'python', **kwargs) -> str: ... def add_line_numbers(source: str | List[str], start: int = 1, n_digits: int | None = None) -> List[str] | str: ... def codeblock(block_str: str) -> str: ... Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_stream.py000066400000000000000000000145651505122333300242300ustar00rootroot00000000000000""" Functions for capturing and redirecting IO streams. The :class:`CaptureStdout` captures all text sent to stdout and optionally prevents it from actually reaching stdout. The :class:`TeeStringIO` does the same thing but for arbitrary streams. It is how the former is implemented. """ import sys import io class TeeStringIO(io.StringIO): """ An IO object that writes to itself and another IO stream. Attributes: redirect (io.IOBase): The other stream to write to. Example: >>> redirect = io.StringIO() >>> self = TeeStringIO(redirect) """ def __init__(self, redirect=None): self.redirect = redirect # type: io.IOBase super(TeeStringIO, self).__init__() # Logic taken from prompt_toolkit/output/vt100.py version 3.0.5 in # flush I don't have a full understanding of what the buffer # attribute is supposed to be capturing here, but this seems to # allow us to embed in IPython while still capturing and Teeing # stdout if hasattr(redirect, 'buffer'): self.buffer = redirect.buffer # Py3. else: self.buffer = redirect def isatty(self): # nocover """ Returns true of the redirect is a terminal. Note: Needed for IPython.embed to work properly when this class is used to override stdout / stderr. """ return (self.redirect is not None and hasattr(self.redirect, 'isatty') and self.redirect.isatty()) def fileno(self): """ Returns underlying file descriptor of the redirected IOBase object if one exists. """ if self.redirect is not None: return self.redirect.fileno() else: return super(TeeStringIO, self).fileno() @property def encoding(self): """ Gets the encoding of the `redirect` IO object Example: >>> redirect = io.StringIO() >>> assert TeeStringIO(redirect).encoding is None >>> assert TeeStringIO(None).encoding is None >>> assert TeeStringIO(sys.stdout).encoding is sys.stdout.encoding >>> redirect = io.TextIOWrapper(io.StringIO()) >>> assert TeeStringIO(redirect).encoding is redirect.encoding """ if self.redirect is not None: return self.redirect.encoding else: return super(TeeStringIO, self).encoding def write(self, msg): """ Write to this and the redirected stream """ if self.redirect is not None: self.redirect.write(msg) return super(TeeStringIO, self).write(msg) def flush(self): # nocover """ Flush to this and the redirected stream """ if self.redirect is not None: self.redirect.flush() return super(TeeStringIO, self).flush() class CaptureStream: """ Generic class for capturing streaming output from stdout or stderr """ class CaptureStdout(CaptureStream): r""" Context manager that captures stdout and stores it in an internal stream Args: suppress (bool, default=True): if True, stdout is not printed while captured enabled (bool, default=True): does nothing if this is False Example: >>> self = CaptureStdout(suppress=True) >>> print('dont capture the table flip (╯°□°)╯︵ ┻━┻') >>> with self: ... text = 'capture the heart ♥' ... print(text) >>> print('dont capture look of disapproval ಠ_ಠ') >>> assert isinstance(self.text, str) >>> assert self.text == text + '\n', 'failed capture text' Example: >>> self = CaptureStdout(suppress=False) >>> with self: ... print('I am captured and printed in stdout') >>> assert self.text.strip() == 'I am captured and printed in stdout' Example: >>> self = CaptureStdout(suppress=True, enabled=False) >>> with self: ... print('dont capture') >>> assert self.text is None """ def __init__(self, suppress=True, enabled=True, **kwargs): _misspelled_varname = 'supress' if _misspelled_varname in kwargs: # nocover from xdoctest.utils import util_deprecation util_deprecation.schedule_deprecation( modname='xdoctest', name='supress', type='Argument of CaptureStdout', migration='Use suppress instead', deprecate='1.0.0', error='1.1.0', remove='1.2.0' ) suppress = kwargs.pop(_misspelled_varname) if len(kwargs) > 0: raise ValueError('unexpected args: {}'.format(kwargs)) self.enabled = enabled self.suppress = suppress self.orig_stdout = sys.stdout if suppress: redirect = None else: redirect = self.orig_stdout self.cap_stdout = TeeStringIO(redirect) self.text = None self._pos = 0 # keep track of how much has been logged self.parts = [] self.started = False def log_part(self): """ Log what has been captured so far """ self.cap_stdout.seek(self._pos) text = self.cap_stdout.read() self._pos = self.cap_stdout.tell() self.parts.append(text) self.text = text def start(self): if self.enabled: self.text = '' self.started = True sys.stdout = self.cap_stdout def stop(self): """ Example: >>> CaptureStdout(enabled=False).stop() >>> CaptureStdout(enabled=True).stop() """ if self.enabled: self.started = False sys.stdout = self.orig_stdout def __enter__(self): self.start() return self def __del__(self): # nocover if self.started: self.stop() if self.cap_stdout is not None: self.close() def close(self): self.cap_stdout.close() self.cap_stdout = None def __exit__(self, type_, value, trace): if self.enabled: try: self.log_part() except Exception: # nocover raise finally: self.stop() if trace is not None: return False # return a falsey value on error Erotemic-xdoctest-fac8308/src/xdoctest/utils/util_stream.pyi000066400000000000000000000021031505122333300243620ustar00rootroot00000000000000import io from _typeshed import Incomplete class TeeStringIO(io.StringIO): redirect: io.IOBase buffer: Incomplete def __init__(self, redirect: Incomplete | None = ...) -> None: ... def isatty(self): ... def fileno(self): ... @property def encoding(self): ... def write(self, msg): ... def flush(self): ... class CaptureStream: ... class CaptureStdout(CaptureStream): enabled: Incomplete suppress: Incomplete orig_stdout: Incomplete cap_stdout: Incomplete text: Incomplete parts: Incomplete started: bool def __init__(self, suppress: bool = ..., enabled: bool = ..., **kwargs) -> None: ... def log_part(self) -> None: ... def start(self) -> None: ... def stop(self) -> None: ... def __enter__(self): ... def __del__(self) -> None: ... def close(self) -> None: ... def __exit__(self, type_, value, trace): ... Erotemic-xdoctest-fac8308/tests/000077500000000000000000000000001505122333300166715ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/tests/notebook_with_doctests.ipynb000066400000000000000000000035551505122333300245270ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def inception(text):\n", " '''\n", " Example:\n", " >>> inception(\"I heard you liked doctests\")\n", " '''\n", " print(text + str(x)) \n", " \n", "x = 10\n", "\n", "def foo():\n", " return \"bar\"\n", "\n", "def inception2(text):\n", " '''\n", " Example:\n", " >>> inception2(\"I heard you liked doctests\" + foo())\n", " '''\n", " print(text + str(x)) \n", " \n", "def random_number():\n", " \"\"\"Returns a random integer from 1 to 6.\n", " \n", " >>> type(random_number())\n", " \n", " >>> random_number() in range(1,7)\n", " True\n", " \"\"\"\n", " return 5 # Chosen by a fair dice roll" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if __name__ == '__main__':\n", " import xdoctest\n", " xdoctest.doctest_callable(inception)\n", " xdoctest.doctest_callable(inception2)\n", " xdoctest.doctest_callable(random_number)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if __name__ == '__main__':\n", " import xdoctest\n", " xdoctest.doctest_module(command='all')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 4 } Erotemic-xdoctest-fac8308/tests/pybind11_test/000077500000000000000000000000001505122333300213575ustar00rootroot00000000000000Erotemic-xdoctest-fac8308/tests/pybind11_test/CMakeLists.txt000066400000000000000000000044751505122333300241310ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.5.0) project(my_ext) function(pycmd outvar cmd) execute_process( COMMAND "${PYTHON_EXECUTABLE}" -c "${cmd}" RESULT_VARIABLE _exitcode OUTPUT_VARIABLE _output) if(NOT ${_exitcode} EQUAL 0) message(ERROR "Failed when running python code: \"\"\" ${cmd}\"\"\"") message(FATAL_ERROR "Python command failed with error code: ${_exitcode}") endif() # Remove supurflous newlines (artifacts of print) string(STRIP "${_output}" _output) set(${outvar} "${_output}" PARENT_SCOPE) endfunction() ### # Find scikit-build and include its cmake resource scripts # if (NOT SKBUILD) # Find current python major version user option find_package(PythonInterp REQUIRED) find_package(PythonLibs REQUIRED) include_directories(SYSTEM ${PYTHON_INCLUDE_DIR}) pycmd(skbuild_location "import os, skbuild; print(os.path.dirname(skbuild.__file__))") set(skbuild_cmake_dir "${skbuild_location}/resources/cmake") # If skbuild is not the driver, then we need to include its utilities in our CMAKE_MODULE_PATH list(APPEND CMAKE_MODULE_PATH ${skbuild_cmake_dir}) endif() message(STATUS "!!! ATTEMPTING FIND PYTHON EXTENSIONS !!!") find_package(PythonExtensions REQUIRED) # Fetch pybind11 message(STATUS "!!! ATTEMPTING TO FETCH PYBIND11 CONTENT !!!") include(FetchContent) message(STATUS " (1) FetchContent_Declare") FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 GIT_TAG v2.4.3 ) # new method for cmake 3.16 #message(STATUS " (2) FetchContent_MakeAvailable") #FetchContent_MakeAvailable(pybind11) # old method for cmake < 3.16 message(STATUS " (2) FetchContent_GetProperties") FetchContent_GetProperties(pybind11) if(NOT pybind11_POPULATED) message(STATUS " (3) FetchContent_Populate") FetchContent_Populate(pybind11) add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR}) endif() message(STATUS "pybind11_POPULATED = ${pybind11_POPULATED}") message(STATUS "pybind11_BINARY_DIR = ${pybind11_BINARY_DIR}") message(STATUS "pybind11_SOURCE_DIR = ${pybind11_SOURCE_DIR}") add_library(my_ext MODULE my_ext.cxx) python_extension_module(my_ext) # Not sure why I need this on the CI target_include_directories(my_ext PUBLIC "${pybind11_SOURCE_DIR}/include") set_property(TARGET my_ext PROPERTY CXX_STANDARD 11) install(TARGETS my_ext LIBRARY DESTINATION .) Erotemic-xdoctest-fac8308/tests/pybind11_test/clean.sh000077500000000000000000000001341505122333300227760ustar00rootroot00000000000000#!/bin/bash rm -rf _skbuild rm -rf *.egg-info rm -rf *.dist-info rm -rf install rm -rf *.so Erotemic-xdoctest-fac8308/tests/pybind11_test/my_ext.cxx000066400000000000000000000015761505122333300234210ustar00rootroot00000000000000#include #include #include #include namespace py = pybind11; namespace my_ext { class MyClass { public: std::string hello_from_cpp() const { return "Hello CPP"; }; }; PYBIND11_MODULE(my_ext, m) { m.attr("my_global_var") = 42; py::class_ (m, "MyClass", R"( A simple pybind11 class with a doctest Example: >>> self = MyClass() >>> print(self.hello_from_cpp()) >>> print(self.hello_from_python()) )") .def(py::init<>()) .def("hello_from_cpp", &MyClass::hello_from_cpp) .def("hello_from_python", [](MyClass& self) -> std::string { auto locals = py::dict(py::arg("self")=self); py::exec(R"( retval = 'Hello World' )", py::globals(), locals); return locals["retval"].cast(); }) ; } } Erotemic-xdoctest-fac8308/tests/pybind11_test/run_dynamic_test.sh000077500000000000000000000003221505122333300252620ustar00rootroot00000000000000#./setup.py build #(cd _skbuild/*/cmake-install/ && python -c "import my_ext") #(cd _skbuild/*/cmake-install/ && xdoctest -m my_ext) pip install --target="$(pwd)" . python -c "import my_ext" xdoctest -m my_ext Erotemic-xdoctest-fac8308/tests/pybind11_test/setup.py000077500000000000000000000004471505122333300231010ustar00rootroot00000000000000#!/usr/bin/env python """ pip install --target="$(pwd)" . """ from setuptools import find_packages from skbuild import setup if __name__ == '__main__': setup( name="my_ext", install_requires=['scikit-build', 'cmake', 'pybind11'], packages=find_packages('.'), ) Erotemic-xdoctest-fac8308/tests/test_binary_ext.py000066400000000000000000000161021505122333300224460ustar00rootroot00000000000000from os.path import dirname from os.path import join from xdoctest import utils __NOTES__ = """ Test this in docker # cd ~/code/xdoctest DOCKER_IMAGE=circleci/python:3.9-rc docker pull $DOCKER_IMAGE docker run -v $PWD:/io \ --rm -it $DOCKER_IMAGE bash docker pull python:3.9.0a5-buster docker run -v $PWD:/io \ --rm -it python bash print('feature.compiler_flag = {!r:>60}'.format(bin(221184))) for key in __future__.all_feature_names: feature = getattr(__future__, key, None) print('feature.compiler_flag = {!r:>60}'.format(key + ' ' +bin(feature.compiler_flag))) feature.compiler_flag = 'nested_scopes 0b10000' feature.compiler_flag = 'generators 0b0' feature.compiler_flag = 'with_statement 0b1000000000000000' feature.compiler_flag = 'barry_as_FLUFL 0b1000000000000000000' feature.compiler_flag = 'generator_stop 0b10000000000000000000' feature.compiler_flag = 'annotations 0b100000000000000000000' feature.compiler_flag = '0b110110000000000000 feature.compiler_flag = 'print_function 0b10000000000000000' feature.compiler_flag = 'unicode_literals 0b100000000000000000' feature.compiler_flag = 'division 0b10000000000000' feature.compiler_flag = 'absolute_import 0b100000000000000' feature.compiler_flag = 'print_function 0b100000000000000000000' feature.compiler_flag = 'nested_scopes 0b10000' feature.compiler_flag = 'generators 0b0' feature.compiler_flag = 'division 0b100000000000000000' feature.compiler_flag = 'absolute_import 0b1000000000000000000' feature.compiler_flag = 'with_statement 0b10000000000000000000' feature.compiler_flag = 'unicode_literals 0b1000000000000000000000' feature.compiler_flag = 'barry_as_FLUFL 0b10000000000000000000000' feature.compiler_flag = 'generator_stop 0b100000000000000000000000' feature.compiler_flag = 'annotations 0b1000000000000000000000000' ./python -c "import __future__; print(__future__.print_function.compiler_flag | __future__.division.compiler_flag | __future__.unicode_literals.compiler_flag | __future__.absolute_import.compiler_flag)" __future__. print(eval(compile('[i for i in range(3)]', mode='eval', filename='foo', flags=221184))) print(eval(compile('[i for i in range(3)]', mode='eval', filename='foo', flags=221184))) ./python -c "eval(compile('[i for i in range(3)]', mode='single', filename='fo', flags=0, dont_inherit=True), {})" ./python -c "import __future__; print(eval(compile('[i for i in range(3)]', mode='eval', filename='fo', flags=__future__.print_function.compiler_flag | __future__.division.compiler_flag | __future__.unicode_literals.compiler_flag | __future__.absolute_import.compiler_flag)))" ./python -c "print(eval(compile('[i for i in range(3)]', mode='eval', filename='foo', flags=221184, dont_inherit=True), {}))" python -c "print(eval('[i for i in range(3)]', {}))" ipython -c "print(eval('[i for i in range(3)]', {}))" ipython -c "print(eval('[i for i in range(3)]'))" ipython -c "print(eval('[i for i in range(3)]'))" python -c "eval('print([i for i in range(3)])', {})" # cd /io mkdir -p $HOME/code cd $HOME/code git clone -b dev/hotfix https://github.com/Erotemic/xdoctest.git cd $HOME/code/xdoctest pip install -e .[all] python tests/test_binary_ext.py build_demo_extmod cd /io/tests/pybind11_test mkdir -p /io/tests/pybind11_test/build cd /io/tests/pybind11_test/build cmake .. """ def build_demo_extmod(): """ CommandLine: python tests/test_binary_ext.py build_demo_extmod """ import os import glob import sys import platform plat_impl = platform.python_implementation() if plat_impl == 'PyPy': import pytest pytest.skip('pypy not supported') if sys.platform.startswith('win32'): import pytest pytest.skip('win32 not supported YET') try: import skbuild # NOQA import pybind11 # NOQA import cmake # NOQA import ninja # NOQA except Exception: import pytest pytest.skip('skbuild, ninja, cmake, or pybind11 not available') testing_dpath = dirname(__file__) verstr, details = sys.version.split(' ', 1) try: # poor man's hash (in case python wasnt built with hashlib) coded = (int(details.encode('utf8').hex(), 16) % (2 ** 32)) hashid = coded.to_bytes(4, 'big').hex() except Exception: hashid = 'python2isdead' src_dpath = join(testing_dpath, 'pybind11_test') bin_dpath = join(src_dpath, 'tmp', 'install_{}.{}'.format(verstr, hashid)) print('src_dpath = {!r}'.format(src_dpath)) print('bin_dpath = {!r}'.format(bin_dpath)) utils.ensuredir(bin_dpath) candidates = list(glob.glob(join(bin_dpath, 'my_ext.*'))) if len(candidates) == 0: pip_args = ['install', '--target={}'.format(bin_dpath), src_dpath] print('pip_args = {!r}'.format(pip_args)) if 0: pyexe = sys.executable ret = os.system(pyexe + ' -m pip ' + ' '.join(pip_args)) else: try: from pip.__main__ import _main as pip_main except (AttributeError, ImportError): from pip._internal import main as pip_main if callable(pip_main): pip_main_func = pip_main else: pip_main_func = pip_main.main ret = pip_main_func(pip_args) assert ret == 0, 'unable to build our pybind11 example' candidates = list(glob.glob(join(bin_dpath, 'my_ext.*'))) assert len(candidates) == 1 extmod_fpath = candidates[0] return extmod_fpath def test_run_binary_doctests(): """ Tests that we can run doctests in a compiled pybind11 module CommandLine: python ~/code/xdoctest/tests/test_binary_ext.py test_run_binary_doctests Notes: xdoctest -m $HOME/code/xdoctest/tests/pybind11_test/install/my_ext.cpython-38-x86_64-linux-gnu.so list --analysis=dynamic """ extmod_fpath = build_demo_extmod() print('extmod_fpath = {!r}'.format(extmod_fpath)) from xdoctest import runner # results = runner.doctest_module(extmod_fpath, analysis='auto') results = runner.doctest_module(extmod_fpath, analysis='dynamic', command='list', argv=[], verbose=3) print('results = {!r}'.format(results)) results = runner.doctest_module(extmod_fpath, analysis='dynamic', command='all', argv=[], verbose=3) print('results = {!r}'.format(results)) assert results['n_passed'] == 1 if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/tests/test_binary_ext.py """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_cases.py000066400000000000000000000135211505122333300214020ustar00rootroot00000000000000from xdoctest.utils.util_misc import _run_case from xdoctest import utils def test_properties(): """ Test that all doctests are extracted from properties correctly. https://github.com/Erotemic/xdoctest/issues/73 Credit: @trappitsch """ text = _run_case(utils.codeblock( ''' class Test: @property def test(self): """ Example: >>> ini = Test() >>> ini.test 3.14 """ return 3.14 @test.setter def test(self, s): pass ''')) assert 'running 1 test' in text text = _run_case(utils.codeblock( ''' class Test: @property def test(self): """ Example: >>> ini = Test() >>> ini.test 3.14 """ return 3.14 ''')) assert 'running 1 test' in text text = _run_case(utils.codeblock( ''' class Test: @property def test(self): """ Example: >>> ini = Test() >>> ini.test 3.14 """ return 3.14 @test.setter def test(self, s): """ Example: >>> ini = Test() >>> ini.test = 3 """ pass ''')) assert 'running 1 test' in text text = _run_case(utils.codeblock( ''' class Test: @property def test(self): return 3.14 @test.setter def test(self, s): """ Example: >>> ini = Test() >>> ini.test = 3 """ pass ''')) assert 'running 0 test' in text text = _run_case(utils.codeblock( ''' class Test: @property def test(self): return 3.14 @test.setter def test(self, s): """ Example: >>> ini = Test() >>> ini.test = 3 """ pass @test.deleter def test(self, s): """ Example: >>> ini = Test() >>> ini.test = 3 """ pass ''')) assert 'running 0 test' in text def test_correct_skipping_on_decorators1(): """ This is a weird case similar to the torch dispatch doctest ~/code/pytorch/torch/fx/experimental/unification/multipledispatch/core.py Something about it causes the skip directive not to be applied to the entire thing. Not quite sure what's going on yet. The issue was that decorator line numbers were returning as the line of the function itself. This mean that the PS1 grouping put the directive in a group with logic, which made the parser think it was inline, which meant the skip state was cleared after it was executed, so it executed the bad code. This fixes that. """ import xdoctest from xdoctest import runner from os.path import join source = utils.codeblock( ''' def dispatch(*types, **kwargs): """ blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah Example: >>> # xdoctest: +SKIP >>> @dispatch(int) ... def f(x): ... return x + 1 >>> @dispatch(float) ... def f(x): ... return x + 1 >>> f(3) 4 >>> f(3.0) 4.0 """ return lambda x: x ''') config = { # 'global_exec': 'a=1', 'style': 'google', } # xdoctest.global_state.DEBUG = 1 # xdoctest.global_state.DEBUG_PARSER = 10 # xdoctest.global_state.DEBUG_CORE = 1 # xdoctest.global_state.DEBUG_RUNNER = 1 # xdoctest.global_state.DEBUG_DOCTEST = 1 temp = utils.TempDir() dpath = temp.ensure() with temp as temp: modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) examples = list(xdoctest.core.parse_doctestables(modpath, style='google', analysis='static')) print(f'examples={examples}') with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=[''], config=config) print(cap.text) assert '1 skipped' in cap.text def test_correct_skipping_on_decorators_simple(): """ minimal test for decorator skips """ import xdoctest from xdoctest import runner from os.path import join source = utils.codeblock( ''' def _my_decorator(): """ Example: >>> # xdoctest: +SKIP >>> @_my_decorator() ... def my_func(x): ... ... >>> f(3) """ return ''') config = { 'style': 'google', } temp = utils.TempDir() dpath = temp.ensure() with temp as temp: modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) examples = list(xdoctest.core.parse_doctestables(modpath, style='google', analysis='static')) print(f'examples={examples}') with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=[''], config=config) print(cap.text) assert '1 skipped' in cap.text Erotemic-xdoctest-fac8308/tests/test_checker.py000066400000000000000000000027401505122333300217110ustar00rootroot00000000000000from xdoctest import checker from xdoctest import directive # from xdoctest import utils def test_visible_lines(): """ pytest tests/test_checker.py """ got = 'this is invisible\ronly this is visible' print(got) want = 'only this is visible' assert checker.check_output(got, want) def test_visible_lines_explicit(): """ pytest tests/test_checker.py """ got = 'invisible\rIS-visible' want = 'invisible\rIS-visible' # The got-want checker is quite permissive. # Use asserts for non-permissive tests. assert checker.check_output(got, want) def test_blankline_accept(): """ pytest tests/test_checker.py """ # Check that blankline is normalized away runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False}) got = 'foo\n\nbar' want = 'foo\n\nbar' assert checker.check_output(got, want, runstate) def test_blankline_failcase(): # Check that blankline is not normalizd in a "got" statement runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False}) got = 'foo\n\nbar' want = 'foo\n\nbar' assert not checker.check_output(got, want, runstate) def test_blankline_not_accept(): # Check that blankline is not normalized away when # DONT_ACCEPT_BLANKLINE is on runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': True}) got = 'foo\n\nbar' want = 'foo\n\nbar' assert not checker.check_output(got, want, runstate) Erotemic-xdoctest-fac8308/tests/test_core.py000066400000000000000000000423761505122333300212460ustar00rootroot00000000000000from os.path import join import xdoctest from xdoctest import core from xdoctest import utils def _test_status(docstr): docstr = utils.codeblock(docstr) try: temp = utils.util_misc.TempDoctest(docstr=docstr) except Exception: raise # pytest seems to load an older version of xdoctest for some reason import xdoctest import inspect print('xdoctest.__version__ = {!r}'.format(xdoctest.__version__)) print('utils = {!r}'.format(utils)) print('utils.util_misc = {!r}'.format(utils.util_misc)) print('utils.TempDoctest = {!r}'.format(utils.TempDoctest)) print(inspect.getargspec(utils.TempDoctest)) raise doctests = list(core.parse_doctestables(temp.modpath)) status = doctests[0].run(verbose=0, on_error='return') return status def test_mod_lineno(): with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_mod_lineno.py') source = utils.codeblock( ''' class Fun: #1 @property def test(self): """ # 4 >>> a = 1 >>> 1 / 0 """ ''') with open(modpath, 'w') as file: file.write(source) doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] # print(self._parts[0]) assert self.lineno == 5 # print(self.format_src()) self.config['colored'] = False assert self.format_src(offset_linenos=False).strip().startswith('1') assert self.format_src(offset_linenos=True).strip().startswith('5') with utils.PythonPathContext(dpath): status = self.run(verbose=10, on_error='return') assert not status['passed'] def test_mod_globals(): with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_mod_globals.py') source = utils.codeblock( ''' X = 10 def test(self): """ >>> X 10 """ ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert status['passed'] assert self.logged_evals[0] == 10 def test_show_entire(): """ pytest tests/test_core.py::test_show_entire """ temp = utils.TempDir() dpath = temp.ensure() modpath = join(dpath, 'test_show_entire.py') source = utils.codeblock( ''' def foo(): """ Prefix Example: >>> x = 4 >>> x = 5 + x >>> x = 6 + x >>> x = 7 + x >>> x 22 >>> x = 8 + x >>> x = 9 + x >>> x = 10 + x >>> x = 11 + x >>> x = 12 + x >>> x 42 text-line-after """ ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core # calldefs = core.module_calldefs(modpath) # docline = calldefs['foo'].doclineno # docstr = calldefs['foo'].docstr # all_parts = parser.DoctestParser().parse(docstr) # assert docline == 2 doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] self.config['colored'] = False print(self.lineno) print(self._parts[0].line_offset) print(self.format_src()) src_offset = self.format_src(offset_linenos=True).strip() src_nooffset = self.format_src(offset_linenos=False).strip() assert src_offset[:4].startswith('6') assert src_nooffset[:4].startswith('1') with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert not status['passed'] temp.cleanup() def test_freeform_parse_lineno(): """ python ~/code/xdoctest/tests/test_core.py test_freeform_parse_lineno """ docstr = utils.codeblock( ''' >>> print('line1') # test.line=1, offset=0 Example: >>> x = 0 # test.line=4, offset=0 DisableExample: >>> x = 0 # test.line=7, offset=0 Example: >>> True # test.line=10, offset=0 True Example: >>> False # test.line=14, offset=0 >>> False # test.line=15, offset=1 False >>> True # test.line=17, offset=3 junk text >>> x = 4 # line 20, offset 0 >>> x = 5 + x # line 21, offset 1 33 >>> x = 6 + x # line 23, offset 3 text-line-after ''') from xdoctest import core doctests = list(core.parse_freeform_docstr_examples(docstr, lineno=1, asone=False)) assert [test.lineno for test in doctests] == [1, 4, 10, 14, 20] # This asserts if the lines are consecutive. Should we enforce this? # Perhaps its ok if they are not. for test in doctests: assert test._parts[0].line_offset == 0 offset = 0 for p in test._parts: assert p.line_offset == offset offset += p.n_lines doctests = list(core.parse_freeform_docstr_examples(docstr, lineno=1, asone=True)) assert [test.lineno for test in doctests] == [1] doctests = list(core.parse_google_docstr_examples(docstr, lineno=1)) assert [test.lineno for test in doctests] == [4, 10, 14] for test in doctests: test._parse() assert test._parts[0].line_offset == 0 offset = 0 for p in test._parts: assert p.line_offset == offset offset += p.n_lines def test_collect_module_level(): """ pytest tests/test_core.py::test_collect_module_level -s -vv Ignore: temp = utils.TempDir() """ temp = utils.TempDir() dpath = temp.ensure() modpath = join(dpath, 'test_collect_module_level.py') source = utils.codeblock( ''' """ >>> pass """ ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] assert self.callname == '__doc__' self.config['colored'] = False src_offset = self.format_src(offset_linenos=True).strip() src_nooffset = self.format_src(offset_linenos=False).strip() assert src_offset[:4].startswith('2') assert src_nooffset[:4].startswith('1') with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert status['passed'] temp.cleanup() def test_collect_module_level_singleline(): """ pytest tests/test_core.py::test_collect_module_level Ignore: temp = utils.TempDir() """ temp = utils.TempDir() dpath = temp.ensure() modpath = join(dpath, 'test_collect_module_level_singleline.py') source = utils.codeblock( '''">>> pass"''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] assert self.callname == '__doc__' self.config['colored'] = False assert self.format_src(offset_linenos=True).strip().startswith('1') assert self.format_src(offset_linenos=False).strip().startswith('1') with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert status['passed'] temp.cleanup() def test_no_docstr(): """ CommandLine: python -m test_core test_no_docstr """ with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_no_docstr.py') source = utils.codeblock( ''' def get_scales(kpts): """ Gets average scale (does not take into account elliptical shape """ _scales = np.sqrt(get_sqrd_scales(kpts)) return _scales ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 0 def test_oneliner(): """ python ~/code/xdoctest/tests/test_core.py test_oneliner """ with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_oneliner.py') source = utils.codeblock( ''' def foo(): """ >>> assert False, 'should fail' """ ''') with open(modpath, 'w') as file: file.write(source) doctests = list(core.parse_doctestables(modpath)) assert len(doctests) == 1 print('doctests = {!r}'.format(doctests)) import pytest with pytest.raises(AssertionError, match='should fail'): doctests[0].run() def test_delayed_want_pass_cases(): """ The delayed want algorithm allows a want statement to match trailing unmatched stdout if it fails to directly match the most recent stdout. In more mathy terms let $w$ be the current "want", and let $g[-t:]$ be the trailing $t$ most recent "gots" captured from stdout. We say the "want" matches "got" if $w matches g[-t:] for t in range(1, n)$, where $n$ is the index of the last part with a success match. CommandLine: python ~/code/xdoctest/tests/test_core.py test_delayed_want_pass_cases """ # Pass Case1: status = _test_status( """ >>> print('some text') >>> print('more text') some text more text """) assert status['passed'] # Pass Case2: status = _test_status( """ >>> print('some text') some text >>> print('more text') more text """) assert status['passed'] # Pass Case3: "its ok to only match more text and ignore some text" status = _test_status( """ >>> print('some text') >>> print('more text') more text """) assert status['passed'] def test_delayed_want_fail_cases(): """ CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_delayed_want_fail_cases """ # Fail Case4: "more text has not been printed yet" status = _test_status( """ >>> print('some text') some text more text >>> print('more text') """) assert not status['passed'] # Fail Case5: cannot match "some text" more than once status = _test_status( """ >>> print('some text') some text >>> print('more text') some text more text """) assert not status['passed'] # Fail Case6: Because "more text" was matched, "some text" is forever # ignored status = _test_status( """ >>> print('some text') >>> print('more text') more text >>> print('even more text') some text even more text """) assert not status['passed'] # alternate case 6 status = _test_status( """ >>> print('some text') >>> print('more text') more text >>> print('even more text') some text more text even more text """) assert not status['passed'] def test_indented_grouping(): """ Initial changes in 0.10.0 broke parsing of some ubelt tests, check to ensure using `...` in indented blocks is ok (as long as there is no want string in the indented block). CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_indented_grouping """ from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> from xdoctest.utils import codeblock >>> # Simulate an indented part of code >>> if True: >>> # notice the indentation on this will be normal >>> codeblock_version = codeblock( ... ''' ... def foo(): ... return 'bar' ... ''' ... ) >>> # notice the indentation and newlines on this will be odd >>> normal_version = (''' ... def foo(): ... return 'bar' ... ''') >>> assert normal_version != codeblock_version """)) # print(example.format_src()) status = example.run(verbose=0) assert status['passed'] def test_backwards_compat_eval_in_loop(): """ Test that changes in 0.10.0 fix backwards compatibility issue. CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_backwards_compat_eval_in_loop """ from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> for i in range(2): ... '%s' % i ... '0' '1' """)) # print(example.format_src()) status = example.run(verbose=0) assert status['passed'] example = DocTest( utils.codeblock(r""" >>> for i in range(2): ... '%s' % i '0' '1' """)) status = example.run(verbose=0) assert status['passed'] def test_backwards_compat_indent_value(): """ CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_backwards_compat_indent_value """ from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> b = 3 >>> if True: ... a = 1 ... isinstance(1, int) True """)) status = example.run(verbose=0) assert status['passed'] def test_concise_try_except(): """ CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_concise_try_except """ from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> # xdoctest: +IGNORE_WANT >>> try: raise Exception ... except Exception: print(lambda *a, **b: sys.stdout.write(str(a) + "\n" + str(b))) a bad want string ... """)) status = example.run(verbose=0) assert status['passed'] from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> # xdoctest: +IGNORE_WANT >>> try: raise Exception >>> except Exception: print(lambda *a, **b: sys.stdout.write(str(a) + "\n" + str(b))) a bad want string ... """)) status = example.run(verbose=0) assert status['passed'] def test_semicolon_line(): r""" Test for https://github.com/Erotemic/xdoctest/issues/108 Note: Notes on the issue: .. code:: python # This works compile("import os; print(os)", filename="", mode='exec') compile("import os; print(os)", filename="", mode='single') compile("1; 2", filename="", mode='exec') compile("1; 2", filename="", mode='single') compile("print();print()", filename="", mode='single') compile("print();print()", filename="", mode='exec') compile("print()", filename="", mode='eval') compile("print()", filename="", mode='exec') compile("print()", filename="", mode='single') # This breaks: compile("import os; print(os)", filename="", mode='eval') # I suppose we can't have imports in an eval? compile("import os\n", filename="", mode='eval') # Or multiple lines? compile("print();print()", filename="", mode='eval') # No imports, no assignments, no semicolons compile("1; 2", filename="", mode='eval') CommandLine: xdoctest -m ~/code/xdoctest/tests/test_core.py test_concise_exceptions """ from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> import os; print(os.path.abspath('.')) """)) status = example.run(verbose=0) assert status['passed'] # The problem case was when it was compiled with a "want" statement # from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> import os; print(os.path.abspath('.')) ... """)) status = example.run(verbose=0) assert status['passed'] # Test single import # import xdoctest # xdoctest.parser.DEBUG = 100 from xdoctest.doctest_example import DocTest example = DocTest( utils.codeblock(r""" >>> import os ... """)) status = example.run(verbose=0) assert status['passed'] if __name__ == '__main__': """ CommandLine: export PYTHONPATH=$PYTHONPATH:/home/joncrall/code/xdoctest/tests python ~/code/xdoctest/tests/test_core.py zero pytest tests/test_core.py -vv """ import xdoctest # NOQA xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_directive.py000066400000000000000000000061111505122333300222570ustar00rootroot00000000000000from xdoctest import doctest_example from xdoctest import utils def test_inline_skip_directive(): """ pytest tests/test_directive.py::test_inline_skip_directive """ string = utils.codeblock( ''' >>> x = 0 >>> assert False, 'should be skipped' # doctest: +SKIP >>> y = 0 ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') # TODO: ensure that lines after the inline are run assert result['passed'] def test_block_skip_directive(): """ pytest tests/test_directive.py::test_block_skip_directive """ string = utils.codeblock( ''' >>> x = 0 >>> # doctest: +SKIP >>> assert False, 'should be skipped' ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_multi_requires_directive(): """ Test semi-complex case with multiple requirements in a single line xdoctest ~/code/xdoctest/tests/test_directive.py test_multi_requires_directive """ string = utils.codeblock( ''' >>> x = 0 >>> print('not-skipped') >>> # doctest: +REQUIRES(env:NOT_EXIST, --show, module:xdoctest) >>> print('is-skipped') >>> assert False, 'should be skipped' >>> # doctest: -REQUIRES(env:NOT_EXIST, module:xdoctest) >>> print('is-skipped') >>> assert False, 'should be skipped' >>> # doctest: +REQUIRES(env:NOT_EXIST, --show, module:xdoctest) >>> print('is-skipped') >>> assert False, 'should be skipped' >>> # doctest: -REQUIRES(env:NOT_EXIST) >>> print('is-skipped') >>> assert False, 'should be skipped' >>> # doctest: -REQUIRES(--show) >>> print('not-skipped') >>> x = 'this will not be skipped' >>> # doctest: -REQUIRES(env:NOT_EXIST, --show, module:xdoctest) >>> print('not-skipped') >>> assert x == 'this will not be skipped' ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') stdout = ''.join(list(self.logged_stdout.values())) assert result['passed'] assert stdout.count('not-skipped') == 3 assert stdout.count('is-skipped') == 0 def test_directive_syntax_error(): string = utils.codeblock( ''' >>> x = 0 >>> # doctest: +REQUIRES(module:xdoctest) >>> print('not-skipped') >>> # doctest: +REQUIRES(badsyntax) >>> print('is-skipped') ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='return') assert not result['passed'] assert 'Failed to parse' in result['exc_info'][1].args[0] assert 'line 4' in result['exc_info'][1].args[0] stdout = ''.join(list(self.logged_stdout.values())) assert stdout.count('not-skipped') == 1 if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/tests/test_directive.py pytest ~/code/xdoctest/tests/test_directive.py """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_doctest_example.py000066400000000000000000000271261505122333300234720ustar00rootroot00000000000000from xdoctest import doctest_example from xdoctest import exceptions from xdoctest import utils from xdoctest import constants from xdoctest import checker def test_exit_test_exception(): """ pytest tests/test_doctest_example.py::test_exit_test_exception """ string = utils.codeblock( ''' >>> from xdoctest import ExitTestException >>> raise ExitTestException() >>> 0 / 0 # should never reach this 2 ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_failed_assign_want(): """ pytest tests/test_doctest_example.py::test_exit_test_exception """ string = utils.codeblock( ''' >>> name = 'foo' 'anything' ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='return', verbose=0) assert result['failed'] fail_text = '\n'.join(self.repr_failure()) assert 'Got nothing' in fail_text def test_continue_ambiguity(): """ pytest tests/test_doctest_example.py::test_exit_test_exception """ string = utils.codeblock( ''' >>> class Lowerer: ... def __init__(self): ... self.cache = LRI() ... ... def lower(self, text): ... return text.lower() ... ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='return', verbose=3) assert result['passed'] def test_contination_want_ambiguity(): """ xdoctest ~/code/xdoctest/tests/test_doctest_example.py test_contination_want_ambiguity """ string = utils.codeblock( ''' >>> class Lowerer: ... def __init__(self): ... self.cache = LRI() ... ... def lower(self, text): ... return text.lower() ... ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='return', verbose=3) assert result['passed'] def test_multiline_list(): """ pytest tests/test_doctest_example.py::test_multiline_list """ string = utils.codeblock( ''' >>> x = [1, 2, 3, >>> 4, 5, 6] >>> print(len(x)) 6 ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_failure(): string = utils.codeblock( ''' >>> i = 0 >>> 0 / i 2 ''') self = doctest_example.DocTest(docsrc=string, lineno=1000) self._parse() try: self.run(on_error='raise') except ZeroDivisionError: pass else: raise AssertionError('should have gotten zero division') result = self.run(on_error='return') assert not result['passed'] def test_format_src(): """ python tests/test_doctest_example.py test_format_src pytest tests/test_doctest_example.py::test_format_src -s -v """ string = utils.codeblock( ''' >>> i = 0 >>> 0 / i 2 ''') string_with_lineno = utils.codeblock( ''' 1 >>> i = 0 2 >>> 0 / i 2 ''').replace('!', ' ') self = doctest_example.DocTest(docsrc=string) self._parse() assert self.format_src(colored=0, linenos=1) == string_with_lineno assert self.format_src(colored=0, linenos=0) == string assert utils.strip_ansi(self.format_src(colored=1, linenos=1)) == string_with_lineno assert utils.strip_ansi(self.format_src(colored=1, linenos=0)) == string def test_eval_expr_capture(): """ pytest tests/test_doctest_example.py::test_eval_expr_capture -s """ docsrc = utils.codeblock( ''' >>> x = 3 >>> y = x + 2 >>> y + 2 2 ''') self = doctest_example.DocTest(docsrc=docsrc) self._parse() p1, p2 = self._parts # test_globals = {} # code1 = compile(p1.source, '', 'exec') # exec(code1, test_globals) # code2 = compile(p2.source, '', 'eval') # result = eval(code2, test_globals) try: self.run() except Exception as ex: assert hasattr(ex, 'output_difference') msg = ex.output_difference(colored=False) assert msg == utils.codeblock( ''' Expected: 2 Got: 7 ''') def test_run_multi_want(): docsrc = utils.codeblock( ''' >>> x = 2 >>> x 2 >>> 'string' 'string' >>> print('string') string ''') self = doctest_example.DocTest(docsrc=docsrc) self.run() result = self.run() assert result['passed'] assert list(self.logged_stdout.values()) == ['', '', '', 'string\n'] assert list(self.logged_evals.values()) == [constants.NOT_EVALED, 2, 'string', None] def test_comment(): docsrc = utils.codeblock( ''' >>> # foobar ''') self = doctest_example.DocTest(docsrc=docsrc) self._parse() assert len(self._parts) == 1 self.run(verbose=0) docsrc = utils.codeblock( ''' >>> # foobar >>> # bazbiz ''') self = doctest_example.DocTest(docsrc=docsrc) self._parse() assert len(self._parts) == 1 self.run(verbose=0) docsrc = utils.codeblock( ''' >>> # foobar >>> x = 0 >>> x / 0 >>> # bazbiz ''') self = doctest_example.DocTest(docsrc=docsrc, lineno=1) self._parse() assert len(self._parts) == 1 result = self.run(on_error='return', verbose=0) assert not result['passed'] assert self.failed_lineno() == 3 def test_want_error_msg(): """ python tests/test_doctest_example.py test_want_error_msg pytest tests/test_doctest_example.py::test_want_error_msg """ string = utils.codeblock( ''' >>> raise Exception('everything is fine') Traceback (most recent call last): Exception: everything is fine ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_want_error_msg_failure(): """ python tests/test_doctest_example.py test_want_error_msg_failure pytest tests/test_doctest_example.py::test_want_error_msg_failure """ string = utils.codeblock( ''' >>> raise Exception('everything is NOT fine') Traceback (most recent call last): Exception: everything is fine ''') self = doctest_example.DocTest(docsrc=string) import pytest with pytest.raises(checker.GotWantException): self.run(on_error='raise') def test_await(): """ python tests/test_doctest_example.py test_await pytest tests/test_doctest_example.py::test_await """ string = utils.codeblock( ''' >>> import asyncio >>> res = await asyncio.sleep(0, result="slept") >>> print(res) slept ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_async_for(): """ python tests/test_doctest_example.py test_async_for pytest tests/test_doctest_example.py::test_async_for """ string = utils.codeblock( ''' >>> async def test_gen(): >>> yield 5 >>> yield 6 >>> async for i in test_gen(): >>> print(i) 5 6 ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_async_with(): """ python tests/test_doctest_example.py test_async_with pytest tests/test_doctest_example.py::test_async_with """ string = utils.codeblock( ''' >>> from contextlib import asynccontextmanager >>> import asyncio >>> @asynccontextmanager >>> async def gen(): >>> try: >>> yield 1 >>> finally: >>> await asyncio.sleep(0) >>> async with gen() as res: >>> print(res) 1 ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_async_future_without_directive(): """ python tests/test_doctest_example.py test_async_future_without_directive pytest tests/test_doctest_example.py::test_async_future_without_directive """ string = utils.codeblock( ''' >>> import asyncio >>> future = asyncio.get_running_loop().create_future() >>> future.set_result("ok") >>> print(await future) ok ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='return') assert result['failed'] def test_async_future_with_directive(): """ python tests/test_doctest_example.py test_async_future_with_directive pytest tests/test_doctest_example.py::test_async_future_with_directive """ string = utils.codeblock( ''' >>> # xdoctest: +ASYNC >>> import asyncio >>> future = asyncio.get_running_loop().create_future() >>> future.set_result("ok") >>> print(await future) ok ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_await_in_running_loop(): """ python tests/test_doctest_example.py test_await_in_running_loop pytest tests/test_doctest_example.py::test_await_in_running_loop """ string = utils.codeblock( ''' >>> import asyncio >>> res = await asyncio.sleep(0, result="slept") >>> print(res) slept ''') import asyncio import pytest self = doctest_example.DocTest(docsrc=string) async def run_in_loop(doctest, on_error, verbose=None): return doctest.run(on_error=on_error, verbose=verbose) with pytest.raises(exceptions.ExistingEventLoopError): asyncio.run(run_in_loop(self, 'raise')) self = doctest_example.DocTest(docsrc=string) res = asyncio.run(run_in_loop(self, 'return', verbose=3)) assert res['failed'] assert self.repr_failure() def test_async_def(): """ python tests/test_doctest_example.py test_async_def pytest tests/test_doctest_example.py::test_async_def """ string = utils.codeblock( ''' >>> import asyncio >>> async def run_async(): >>> return await asyncio.sleep(0, result="slept") >>> res = asyncio.run(run_async()) >>> print(res) slept ''') self = doctest_example.DocTest(docsrc=string) result = self.run(on_error='raise') assert result['passed'] def test_tabs_in_doctest(): """ pytest tests/test_doctest_example.py::test_tabs_in_doctest """ from xdoctest import utils from xdoctest import doctest_example tab = '\t' assert ord(tab) == 9 string = utils.codeblock( f''' >>> text = "tab{tab}sep{tab}val" >>> print(repr(text)) >>> assert chr(9) in text >>> assert ord("{tab}") == 9 ''') self = doctest_example.DocTest(docsrc=string) # This was ok in version 1.2.0 assert tab in self.docsrc # Failed in 1.2.0 result = self.run(on_error='raise', verbose=3) assert result['passed'] if __name__ == '__main__': """ CommandLine: export PYTHONPATH=$PYTHONPATH:/home/joncrall/code/xdoctest/tests python ~/code/xdoctest/tests/test_doctest_example.py xdoctest ~/code/xdoctest/tests/test_doctest_example.py zero-args """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_doctest_in_notebook.ipynb000066400000000000000000000057501505122333300250350ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def inception(text):\n", " '''\n", " Example:\n", " >>> inception(\"I heard you liked doctests\")\n", " '''\n", " print(text + str(x)) \n", " \n", "x = 10\n", "\n", "def foo():\n", " return \"bar\"\n", "\n", "def inception2(text):\n", " '''\n", " Example:\n", " >>> inception2(\"I heard you liked doctests\" + foo())\n", " '''\n", " print(text + str(x)) \n", " \n", "def random_number():\n", " \"\"\"Returns a random integer from 1 to 6.\n", " \n", " >>> type(random_number())\n", " \n", " >>> random_number() in range(1,7)\n", " True\n", " \"\"\"\n", " return 5 # Chosen by a fair dice roll" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "====== ======\n", "* DOCTEST : ::inception:0, line 3 <- wrt source file\n", "DOCTEST SOURCE\n", "1 >>> inception(\"I heard you liked doctests\")\n", "DOCTEST STDOUT/STDERR\n", "I heard you liked doctests10\n", "DOCTEST RESULT\n", "* SUCCESS: ::inception:0\n", "====== ======\n" ] } ], "source": [ "import xdoctest\n", "xdoctest.doctest_callable(inception)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "====== ======\n", "* DOCTEST : ::inception2:0, line 3 <- wrt source file\n", "DOCTEST SOURCE\n", "1 >>> inception2(\"I heard you liked doctests\" + foo())\n", "DOCTEST STDOUT/STDERR\n", "I heard you liked doctestsbar10\n", "DOCTEST RESULT\n", "* SUCCESS: ::inception2:0\n", "====== ======\n" ] } ], "source": [ "import xdoctest\n", "xdoctest.doctest_callable(inception2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import xdoctest\n", "xdoctest.doctest_callable(random_number)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 4 } Erotemic-xdoctest-fac8308/tests/test_dynamic.py000066400000000000000000000142551505122333300217350ustar00rootroot00000000000000""" This mod has a docstring Example: >>> pass """ import sys from xdoctest import dynamic_analysis as dynamic from xdoctest import static_analysis as static # add function from another module is_defined_by_module = dynamic.is_defined_by_module TopLevelVisitor = static.TopLevelVisitor class SimpleDescriptor: def __init__(self): self.value = 0 def __get__(self, instance, owner): return self.value def __set__(self, instance, value): self.value = float(value) class SimpleClass: """ Example: >>> pass """ cls_attr = SimpleDescriptor() # Injected funcs should not be part of the calldefs visit = TopLevelVisitor.visit def __init__(self): self.inst_attr = SimpleDescriptor() def submethod1(): """ Example: >>> pass """ pass @classmethod def method1(cls): """ Example: >>> pass """ pass @staticmethod def method2(): """ Example: >>> pass """ pass @property def method3(self): """ Example: >>> pass """ pass def method4(self): """ Example: >>> pass """ pass def simple_func1(): """ Example: >>> pass """ pass def test_parse_dynamic_calldefs(): """ CommandLine: python tests/test_dynamic.py test_parse_dynamic_calldefs """ def subfunc(): """ Example: >>> pass """ pass module = sys.modules[test_parse_dynamic_calldefs.__module__] modpath = module.__file__ calldefs = dynamic.parse_dynamic_calldefs(modpath) keys = [ '__doc__', 'SimpleClass', 'SimpleClass.method1', 'SimpleClass.method2', 'SimpleClass.method3', 'SimpleClass.method4', 'simple_func1', ] for key in keys: print('Check parsed key = {!r} in calldefs'.format(key)) assert key in calldefs keys = [ 'foobar', 'subfunc', 'submethod1', 'is_defined_by_module', 'TopLevelVisitor', 'SimpleClass.visit', ] for key in keys: print('Check parsed key = {!r} not in calldefs'.format(key)) assert key not in calldefs assert 'visit' in dir(SimpleClass) assert 'is_defined_by_module' in dir(module) assert 'TopLevelVisitor' in dir(module) def test_defined_by_module(): """ CommandLine: python tests/test_dynamic.py test_defined_by_module """ module = sys.modules[test_defined_by_module.__module__] instance = SimpleClass() items = [ SimpleClass, SimpleClass.method1, SimpleClass.method2, SimpleClass.method3, SimpleClass.method4, instance, instance.method1, instance.method2, instance.method4, instance.inst_attr, ] for item in items: flag = dynamic.is_defined_by_module(item, module) print('Checking {} is defined by {}'.format(item, module.__name__)) assert flag, '{} should be defined by {}'.format(item, module) items = [ sys, int, 0, 'foobar', module.__name__ ] for item in items: flag = dynamic.is_defined_by_module(item, module) print('Checking {} is not defined by {}'.format(item, module.__name__)) assert not flag, '{} should not be defined by {}'.format(item, module) import platform if platform.python_implementation() == 'PyPy': import pytest pytest.skip('ctypes for pypy') import _ctypes items = [ _ctypes.Array, _ctypes.CFuncPtr, _ctypes.CFuncPtr.restype, ] module = _ctypes for item in items: flag = dynamic.is_defined_by_module(item, module) print('Checking {} is defined by {}'.format(item, module.__name__)) assert flag, '{} should be defined by {}'.format(item, module) import inspect items = [ inspect.re, inspect.re.sub, inspect.re.enum, ] module = inspect for item in items: flag = dynamic.is_defined_by_module(item, module) print('Checking {} is not defined by {}'.format(item, module.__name__)) assert not flag, '{} should be not defined by {}'.format(item, module) def test_programatically_generated_docstrings(): """ Test that the "dynamic" analysis mode works on dynamically generated docstrings. """ from xdoctest import utils from xdoctest.utils.util_misc import TempModule temp = TempModule(utils.codeblock( ''' code = ">>> print('hello world')" def func1(): """ Example: {} """ func1.__doc__ = func1.__doc__.format(code) ''')) import xdoctest # auto wont pick up dynamic doctests by default # Although in the future it would be cool if it did result = xdoctest.doctest_module(temp.modpath, analysis='auto', command='all') assert result['n_total'] == 1 assert result['n_failed'] == 0 # but an explicit dynamic should pick these up result = xdoctest.doctest_module(temp.modpath, analysis='dynamic', command='all') assert result['n_passed'] == 1 assert result['n_failed'] == 0 # module = ub.import_module_from_path(temp.modpath) # assert module.func1.__doc__ is not None # list(xdoctest.core.parse_doctestables(temp.modpath, analysis='dynamic')) # xdoctest.core.DEBUG = 1 # calldefs = list(xdoctest.core.package_calldefs(temp.modpath, analysis='dynamic')) # calldefs = list(xdoctest.core.parse_calldefs(temp.modpath, analysis='dynamic')) # from xdoctest import dynamic_analysis # from xdoctest import static_analysis # static_analysis.parse_static_calldefs(fpath=temp.modpath) # node = dynamic_analysis.parse_dynamic_calldefs(temp.modpath)['func1'] # node = dynamic_analysis.parse_dynamic_calldefs(temp.modpath)['func2'] if __name__ == '__main__': """ CommandLine: python tests/test_dynamic.py pytest tests/test_dynamic.py """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_entry_point.py000066400000000000000000000066721505122333300226670ustar00rootroot00000000000000import sys import os import subprocess import pytest from xdoctest import utils def cmd(command): # simplified version of ub.cmd no fancy tee behavior proc = subprocess.Popen( command, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = proc.communicate() ret = proc.wait() info = { 'proc': proc, 'out': out, 'err': err, 'ret': ret, } return info def skip_if_not_installed(): # If xdoctest is not installed via `pip install -e` # then skip these tests because the entry point wont exist if not utils.is_modname_importable('xdoctest'): pytest.skip('Can only test entry points if xdoctest is installed.') def test_xdoc_console_script_location(): skip_if_not_installed() if sys.platform.startswith('freebsd'): pytest.skip('freebsd is minimal and might not have xdoctest on the path') elif sys.platform.startswith('win32'): pytest.skip() path = os.path.realpath(sys.executable) for i in range(4): path = os.path.dirname(path) print('path = {!r}'.format(path)) scriptdir = os.path.join(path, 'Scripts') if os.path.exists(scriptdir): break script_path = os.path.join(scriptdir, 'xdoctest.exe') assert os.path.exists(script_path) else: from shutil import which script_fpath = which('xdoctest') assert script_fpath is not None, ( 'xdoctest should be installed in the path in normal circumstances') script_fname = os.path.basename(script_fpath) assert script_fname.startswith('xdoctest') def test_xdoc_console_script_exec(): skip_if_not_installed() if sys.platform.startswith('freebsd'): pytest.skip('freebsd is minimal and might not have xdoctest on the path') elif sys.platform.startswith('win32'): path = os.path.realpath(sys.executable) for i in range(4): path = os.path.dirname(path) print('path = {!r}'.format(path)) scriptdir = os.path.join(path, 'Scripts') if os.path.exists(scriptdir): break info = cmd(os.path.join(scriptdir, 'xdoctest.exe')) else: info = cmd('xdoctest') print('info = {!r}'.format(info)) assert 'usage' in info['err'] def test_xdoc_cli_version(): """ CommandLine: python -m xdoctest -m ~/code/xdoctest/tests/test_entry_point.py test_xdoc_cli_version """ import sys if sys.platform.startswith('win32'): pytest.skip() import xdoctest from xdoctest import __main__ print('xdoctest = {!r}'.format(xdoctest)) print('__main__ = {!r}'.format(__main__)) retcode = __main__.main(argv=['--version']) print('retcode = {!r}'.format(retcode)) assert retcode == 0 import xdoctest print('xdoctest = {!r}'.format(xdoctest)) sys.executable try: import ubelt as ub except ImportError: info = cmd(sys.executable + ' -m xdoctest --version') else: info = ub.cmd(sys.executable + ' -m xdoctest --version') print('info = {!r}'.format(info)) print('xdoctest.__version__ = {!r}'.format(xdoctest.__version__)) assert xdoctest.__version__ in info['out'] if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/tests/test_entry_point.py """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_errors.py000066400000000000000000000147341505122333300216270ustar00rootroot00000000000000from os.path import join import warnings import pytest from xdoctest import runner from xdoctest import core from xdoctest import exceptions from xdoctest import utils from xdoctest.utils.util_misc import _run_case # def _check_syntaxerror_behavior(): # import ubelt as ub # source_block = ub.codeblock( # ''' # x = 3 # 3 = 5 # ''') # try: # compile(source_block, filename='', mode='exec') # except SyntaxError as ex1: # print('ex1.text = {!r}'.format(ex1.text)) # print('ex1.offset = {!r}'.format(ex1.offset)) # print('ex1.lineno = {!r}'.format(ex1.lineno)) # import ast # try: # pt = ast.parse(source_block) # except SyntaxError as ex2: # print('ex2.text = {!r}'.format(ex2.text)) # print('ex2.offset = {!r}'.format(ex2.offset)) # print('ex2.lineno = {!r}'.format(ex2.lineno)) # fpath = join(ub.ensure_app_cache_dir('xdoctest', 'test'), 'source.py') # ub.writeto(fpath, source_block) # try: # compile(source_block, filename=fpath, mode='exec') # except SyntaxError as ex2: # print('ex2.text = {!r}'.format(ex2.text)) # print('ex2.offset = {!r}'.format(ex2.offset)) # print('ex2.lineno = {!r}'.format(ex2.lineno)) # import tempfile # import ast # temp = tempfile.NamedTemporaryFile() # temp.file.write((source_block + '\n').encode('utf8')) # temp.file.seek(0) # try: # ast.parse(source_block, temp.name) # except SyntaxError as ex2: # print('ex2.text = {!r}'.format(ex2.text)) # print('ex2.offset = {!r}'.format(ex2.offset)) # print('ex2.lineno = {!r}'.format(ex2.lineno)) # raise def test_parse_syntax_error(): """ CommandLine: python tests/test_errors.py test_parse_syntax_error """ docstr = utils.codeblock( ''' Example: >>> x = 0 >>> 3 = 5 ''') info = {'callname': 'test_synerr', 'lineno': 42} # Eager parsing should cause no doctests with errors to be found # and warnings should be raised with warnings.catch_warnings(record=True) as f_warnlist: f_doctests = list(core.parse_docstr_examples(docstr, style='freeform', **info)) with warnings.catch_warnings(record=True) as g_warnlist: g_doctests = list(core.parse_docstr_examples(docstr, style='google', **info)) for w in g_warnlist: print(w.message) for w in f_warnlist: print(w.message) assert len(g_warnlist) == 1 assert len(f_warnlist) == 1 assert len(g_doctests) == 0 assert len(f_doctests) == 0 # Google style can find doctests with bad syntax, but parsing them # results in an error. g_doctests2 = list(core.parse_google_docstr_examples(docstr, eager_parse=False, **info)) assert len(g_doctests2) == 1 for example in g_doctests2: with pytest.raises(exceptions.DoctestParseError): example._parse() def test_runner_syntax_error(): """ python tests/test_errors.py test_runner_syntax_error pytest tests/test_errors.py -k test_runner_syntax_error xdoctest -m tests/test_errors.py test_runner_syntax_error """ source = utils.codeblock( r''' def demo_parsetime_syntax_error1(): """ Example: >>> from __future__ import print_function >>> print 'Parse-Time Syntax Error' """ def demo_parsetime_syntax_error2(): """ Example: >>> def bad_syntax() return for """ def demo_runtime_error(): """ Example: >>> print('Runtime Error {}'.format(5 / 0)) """ def demo_runtime_name_error(): """ Example: >>> print('Name Error {}'.format(foo)) """ def demo_runtime_warning(): """ Example: >>> import warnings >>> warnings.warn('in-code warning') """ ''') temp = utils.TempDir(persist=True) temp.ensure() dpath = temp.dpath modpath = join(dpath, 'demo_runner_syntax_error.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=[''], style='freeform', verbose=1) print('CAPTURED [[[[[[[[') print(utils.indent(cap.text)) print(']]]]]]]] # CAPTURED') captext = cap.text assert '1 run-time warnings' in captext assert '2 parse-time warnings' in captext # Assert summary line assert '3 warnings' in captext assert '2 failed' in captext assert '1 passed' in captext def test_parse_doctset_error(): source = utils.codeblock( ''' def func_with_an_unparsable_google_docstr(a): """ This function will have an unparsable google docstr Args: a (int): a number Example: >>> a = "\\''' + '''n" >>> func(a) """ pass ''') text = _run_case(source, style='google') text = _run_case(source, style='freeform') del text def test_extract_got_exception(): """ Make a repr that fails CommandLine: xdoctest -m ~/code/xdoctest/tests/test_errors.py test_extract_got_exception """ source = utils.codeblock( ''' class MyObj: """ Example: >>> a = MyObj() >>> a you cant always get what you want """ def __repr__(self): raise Exception('this repr fails') ''') text = _run_case(source, style='google') assert 'ExtractGotReprException' in text if __name__ == '__main__': """ CommandLine: export PYTHONPATH=$PYTHONPATH:/home/joncrall/code/xdoctest/tests python ~/code/xdoctest/tests/test_errors.py pytest ~/code/xdoctest/tests/test_errors.py -s --verbose CommandLine: xdoctest -m ~/code/xdoctest/tests/test_errors.py test_extract_got_exception zero """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_import.py000066400000000000000000000000461505122333300216140ustar00rootroot00000000000000def test_import(): import xdoctestErotemic-xdoctest-fac8308/tests/test_limitations.py000066400000000000000000000022441505122333300226400ustar00rootroot00000000000000""" Tests in this file are more demonstrations of the limitations of the static analysis doctest parser. """ from xdoctest import utils from xdoctest.utils.util_misc import _run_case def test_pathological_property_case(): """ This case actually wont error, but it displays a limitation of static analysis. We trick the doctest node parser into thinking there is a setter property when there really isn't. """ text = _run_case(utils.codeblock( ''' def property(x): class Foo(): def setter(self, x): return x return Foo() class Test: @property def test(self): """ Example: >>> print('not really a getter') """ return 3.14 @test.setter def test(self, s): """ Example: >>> print('not really a setter') """ pass ''')) # If there was a way to make this case fail, that would be ok assert 'Test.test:0' in text # assert 'Test.test.fset:0' in text Erotemic-xdoctest-fac8308/tests/test_notebook.py000066400000000000000000000056141505122333300221300ustar00rootroot00000000000000import pytest import sys from os.path import join, exists, dirname try: from packaging.version import parse as LooseVersion except ImportError: from distutils.version import LooseVersion PY_VERSION = LooseVersion('{}.{}'.format(*sys.version_info[0:2])) IS_MODERN_PYTHON = PY_VERSION > LooseVersion('3.4') def skip_notebook_tests_if_unsupported(): if not IS_MODERN_PYTHON: pytest.skip('jupyter support is only for modern python versions') try: import IPython # NOQA import nbconvert # NOQA import nbformat # NOQA import platform if platform.python_implementation() == 'PyPy': # In xdoctest <= 0.15.0 (~ 2021-01-01) this didn't cause an issue # But I think there was a jupyter update that broke it. # PyPy + Jupyter is currently very niche and I don't have the time # to debug properly, so I'm just turning off these tests. raise Exception except Exception: pytest.skip('Missing jupyter') def cmd(command): # simplified version of ub.cmd no fancy tee behavior import subprocess proc = subprocess.Popen( command, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = proc.communicate() ret = proc.wait() info = { 'proc': proc, 'out': out, 'test_doctest_in_notebook.ipynberr': err, 'ret': ret, } return info def demodata_notebook_fpath(): try: testdir = dirname(__file__) except NameError: # Hack for dev CLI usage import os testdir = os.path.expandvars('$HOME/code/xdoctest/tests/') assert exists(testdir), 'assuming a specific dev environment' notebook_fpath = join(testdir, "notebook_with_doctests.ipynb") return notebook_fpath def test_xdoctest_inside_notebook(): """ xdoctest ~/code/xdoctest/tests/test_notebook.py test_xdoctest_inside_notebook xdoctest tests/test_notebook.py test_xdoctest_inside_notebook xdoctest notebook_with_doctests.ipynb """ # How to run Jupyter from Python # https://nbconvert.readthedocs.io/en/latest/execute_api.html skip_notebook_tests_if_unsupported() notebook_fpath = demodata_notebook_fpath() from xdoctest.utils import util_notebook nb, resources = util_notebook.execute_notebook(notebook_fpath, verbose=3) last_cell = nb['cells'][-1] text = last_cell['outputs'][0]['text'] if '3 / 3 passed' not in text: import warnings warnings.warn('test_xdoctest_inside_notebook might fail due to io issues') def test_xdoctest_outside_notebook(): skip_notebook_tests_if_unsupported() if sys.platform.startswith('win32'): pytest.skip() notebook_fpath = demodata_notebook_fpath() info = cmd(sys.executable + ' -m xdoctest ' + notebook_fpath) text = info['out'] assert '3 / 3 passed' in text Erotemic-xdoctest-fac8308/tests/test_parser.py000066400000000000000000000461671505122333300216140ustar00rootroot00000000000000import pytest from xdoctest import parser from xdoctest import utils from xdoctest import exceptions def test_final_eval_exec(): """ Ensure that if the line before a want is able to be evaled, it is so we can compare its value to the want value. CommandLine: xdoctest -m ~/code/xdoctest/tests/test_parser.py test_final_eval_exec """ string = utils.codeblock( ''' >>> x = 2 >>> x + 1 1 ''') self = parser.DoctestParser() parts = self.parse(string) DEBUG = 0 if DEBUG: print(string) print([p.compile_mode for p in parts]) assert [p.compile_mode for p in parts] == ['exec', 'eval'] string = utils.codeblock( ''' >>> x = 2 >>> x + 1 ''') self = parser.DoctestParser() parts = self.parse(string) if DEBUG: print(string) print([p.compile_mode for p in parts]) assert [p.compile_mode for p in parts] == ['exec'] string = utils.codeblock( r''' >>> x = 2 >>> x += 3 >>> """ ... foobar ... """ '\nfoobar\n' ''') self = parser.DoctestParser() parts = self.parse(string) if DEBUG: print(string) print([p.compile_mode for p in parts]) assert [p.compile_mode for p in parts] == ['exec', 'single'] string = utils.codeblock( r''' >>> i = 0 >>> 0 / i 2 ''') self = parser.DoctestParser() parts = self.parse(string) if DEBUG: print(string) print([p.compile_mode for p in parts]) assert [p.compile_mode for p in parts] == ['exec', 'eval'] string = utils.codeblock( r''' >>> if True: ... 2 2 ''') self = parser.DoctestParser() parts = self.parse(string) if DEBUG: print(string) print([p.compile_mode for p in parts]) assert [p.compile_mode for p in parts] == ['single'] def test_compile_mode_print(): string = utils.codeblock( r''' >>> x = 2 >>> x += 3 >>> print('foo') foo ''') self = parser.DoctestParser() parts = self.parse(string) assert [p.compile_mode for p in parts] == ['exec', 'eval'] def test_label_lines(): string = utils.codeblock( r''' >>> i = 0 >>> 0 / i 2 ''') self = parser.DoctestParser() labeled = self._label_docsrc_lines(string) assert labeled == [ ('dsrc', '>>> i = 0'), ('dsrc', '>>> 0 / i'), ('want', '2') ] def test_label_indented_lines(): string = ''' text >>> dsrc1() want >>> dsrc2() >>> cont( ... a=b) ... dsrc >>> dsrc3(): ... a ... b = """ multiline """ want text ... still text >>> "now its a doctest" text ''' self = parser.DoctestParser() labeled = self._label_docsrc_lines(string) # import sys # print('EXIT NOW') # sys.exit(1) expected = [ ('text', ''), ('text', ' text'), ('dsrc', ' >>> dsrc1()'), ('want', ' want'), ('text', ''), ('dsrc', ' >>> dsrc2()'), ('dsrc', ' >>> cont('), ('dcnt', ' ... a=b)'), ('dcnt', ' ... dsrc'), ('dsrc', ' >>> dsrc3():'), ('dcnt', ' ... a'), ('dcnt', ' ... b = """'), ('dcnt', ' multiline'), ('dcnt', ' """'), ('want', ' want'), ('text', ''), ('text', ' text'), ('text', ' ... still text'), ('dsrc', ' >>> "now its a doctest"'), ('text', ''), ('text', ' text'), ('text', ' '), # FIXME: weird that this space has an indent ] if labeled != expected: try: import itertools as it for got, want in it.zip_longest(labeled, expected): if got != want: print(utils.color_text('GOT = {!r}'.format(got), 'red')) print(utils.color_text('WANT = {!r}'.format(want), 'blue')) else: print('PASS = {!r}'.format(got)) except ImportError: pass raise AssertionError assert labeled == expected def test_ps1_linenos_1(): """ Test we can find the line numbers for every "evaluatable" statement """ source_lines = utils.codeblock( ''' >>> x = 2 >>> x + 1 1 ''').split('\n')[:-1] self = parser.DoctestParser() linenos, mode_hint = self._locate_ps1_linenos(source_lines) assert mode_hint == 'eval' assert linenos == [0, 1] def test_ps1_linenos_2(): source_lines = utils.codeblock( ''' >>> x = """ x = 2 """ >>> print(x.strip() + '1') x = 21 ''').split('\n')[:-1] self = parser.DoctestParser() linenos, mode_hint = self._locate_ps1_linenos(source_lines) assert mode_hint == 'eval' assert linenos == [0, 3] def test_ps1_linenos_3(): source_lines = utils.codeblock( ''' >>> x = """ x = 2 """ >>> y = (x.strip() + '1') 'x = 21' ''').split('\n')[:-1] self = parser.DoctestParser() linenos, mode_hint = self._locate_ps1_linenos(source_lines) assert mode_hint == 'exec' assert linenos == [0, 3] def test_ps1_linenos_4(): source_lines = utils.codeblock( ''' >>> x = """ x = 2 """ >>> def foo(): ... return 5 >>> ms1 = """ ... multistring2 ... multistring2 ... """ >>> ms2 = """ ... multistring2 ... multistring2 ... """ >>> x = sum([ >>> foo() >>> ]) >>> y = len(ms1) + len(ms2) >>> z = ( >>> x + y >>> ) >>> z 59 ''').split('\n')[:-1] self = parser.DoctestParser() linenos, mode_hint = self._locate_ps1_linenos(source_lines) assert mode_hint == 'eval' assert linenos == [0, 3, 5, 9, 13, 16, 17, 20] def test_retain_source(): """ """ source = utils.codeblock( ''' >>> x = 2 >>> print("foo") foo ''') source_lines = source.split('\n')[:-1] self = parser.DoctestParser() linenos, mode_hint = self._locate_ps1_linenos(source_lines) assert mode_hint == 'eval' assert linenos == [0, 1] p1, p2 = self.parse(source) assert p1.source == 'x = 2' assert p2.source == 'print("foo")' def test_package_string_tup(): """ pytest tests/test_parser.py::test_package_string_tup """ raw_source_lines = ['>>> "string"'] raw_want_lines = ['string'] self = parser.DoctestParser() parts = list(self._package_chunk(raw_source_lines, raw_want_lines)) assert len(parts) == 1, 'should only want one string' def test_simulate_repl(): """ pytest tests/test_parser.py::test_package_string_tup """ string = utils.codeblock( ''' >>> x = 1 >>> x = 2 >>> x = 3 ''') self = parser.DoctestParser() self.simulate_repl = False assert len(self.parse(string)) == 1 self.simulate_repl = True assert len(self.parse(string)) == 3 def test_parse_multi_want(): string = utils.codeblock( ''' >>> x = 2 >>> x 2 >>> 'string' 'string' >>> print('string') string ''') self = parser.DoctestParser() parts = self.parse(string) self._label_docsrc_lines(string) assert parts[2].source == "'string'" assert len(parts) == 4 def test_parse_eval_nowant(): string = utils.codeblock( ''' >>> a = 1 >>> 1 / 0 ''') self = parser.DoctestParser() parts = self.parse(string) raw_source_lines = string.split('\n')[:] ps1_linenos, mode_hint = self._locate_ps1_linenos(raw_source_lines) assert ps1_linenos == [0, 1] assert mode_hint == 'eval' # Only one part because there is no want assert len(parts) == 1 def test_parse_eval_single_want(): string = utils.codeblock( ''' >>> a = 1 >>> 1 / 0 We have a want ''') self = parser.DoctestParser() parts = self.parse(string) raw_source_lines = string.split('\n')[:-1] ps1_linenos, mode_hint = self._locate_ps1_linenos(raw_source_lines) assert ps1_linenos == [0, 1] assert mode_hint == 'eval' # Only one part because there is no want assert len(parts) == 2 def test_parse_comment(): string = utils.codeblock( ''' >>> # nothing ''') self = parser.DoctestParser() labeled = self._label_docsrc_lines(string) assert labeled == [('dsrc', '>>> # nothing')] source_lines = string.split('\n')[:] linenos, mode_hint = self._locate_ps1_linenos(source_lines) parts = self.parse(string) assert parts[0].source.strip().startswith('#') def test_text_after_want(): string = utils.codeblock(''' Example: >>> dsrc() want just some test ''') self = parser.DoctestParser() labeled = self._label_docsrc_lines(string) expected = [ ('text', 'Example:'), ('dsrc', ' >>> dsrc()'), ('want', ' want'), ('text', 'just some test'), ] assert labeled == expected def test_want_ellipse_with_space(): string = utils.codeblock(''' Example: >>> dsrc() ... ''') # Add an extra space after the ellipses to be clear what we are testing # and because my editor automatically removes it when I try to save the # file ¯\_(ツ)_/¯ string = string + ' ' self = parser.DoctestParser() labeled = self._label_docsrc_lines(string) expected = [ ('text', 'Example:'), ('dsrc', ' >>> dsrc()'), ('want', ' ... '), ] assert labeled == expected def test_syntax_error(): string = utils.codeblock(''' Example: >>> 03 = dsrc() ''') self = parser.DoctestParser() with pytest.raises(exceptions.DoctestParseError): self.parse(string) def test_nonbalanced_statement(): """ xdoctest ~/code/xdoctest/tests/test_parser.py test_nonbalanced_statement from xdoctest import static_analysis as static lines = ['x = ['] static.is_balanced_statement(lines, only_tokens=True) """ string = utils.codeblock( ''' >>> x = [ # ] this bracket is to make my editor happy and is does not effect the test ''').splitlines()[0] self = parser.DoctestParser() with pytest.raises(exceptions.DoctestParseError) as exc_info: self.parse(string) msg = exc_info.value.orig_ex.msg.lower() assert msg.startswith('ill-formed doctest'.lower()) def test_bad_indent(): """ CommandLine: python tests/test_parser.py test_bad_indent """ string = utils.codeblock( ''' Example: >>> x = [ # ] bad want indent ''') self = parser.DoctestParser() with pytest.raises(exceptions.DoctestParseError) as exc_info: self.parse(string) msg = exc_info.value.orig_ex.msg.lower() assert msg.startswith('Bad indentation in doctest'.lower()) def test_part_nice_no_lineoff(): from xdoctest import doctest_part self = doctest_part.DoctestPart([], [], None) assert str(self) == '' def test_repl_oneline(): string = utils.codeblock( ''' >>> x = 1 ''') self = parser.DoctestParser(simulate_repl=True) parts = self.parse(string) assert [p.source for p in parts] == ['x = 1'] def test_repl_twoline(): string = utils.codeblock( ''' >>> x = 1 >>> x = 2 ''') self = parser.DoctestParser(simulate_repl=True) parts = self.parse(string) assert [p.source for p in parts] == ['x = 1', 'x = 2'] def test_repl_comment_in_string(): source_lines = ['>>> x = """', ' # comment in a string', ' """'] self = parser.DoctestParser() assert self._locate_ps1_linenos(source_lines) == ([0], 'exec') source_lines = [ '>>> x = """', ' # comment in a string', ' """', '>>> x = """', ' # comment in a string', ' """', ] self = parser.DoctestParser() assert self._locate_ps1_linenos(source_lines) == ([0, 3], 'exec') def test_inline_directive(): """ python ~/code/xdoctest/tests/test_parser.py test_inline_directive """ string = utils.codeblock( ''' >>> # doctest: +SKIP >>> func1(* >>> [i for i in range(10)]) >>> # not a directive >>> func2( # not a directive >>> a=b >>> ) >>> func3() # xdoctest: +SKIP >>> func4() want1 >>> func5() # xdoctest: +SKIP want1 >>> # xdoctest: +SKIP >>> func6() >>> func7(a=b, >>> c=d) # xdoctest: +SKIP >>> # xdoctest: +SKIP >>> func8(' # doctest: not a directive') >>> func9(""" # doctest: still not a directive """) finalwant ''') # source_lines = string.splitlines() self = parser.DoctestParser() # [0, 1, 3, 4, 7, 8, 10, 11, 12] # assert ps1_linenos == [0, 2, 5, 6, 8, 9, 10] parts = self.parse(string) # assert len(parts) == for part in parts: print('----') print(part.source) print('----') try: print(parts) except ImportError: pass # TODO: finish me def test_block_directive_nowant1(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_nowant1 """ string = utils.codeblock( ''' >>> # doctest: +SKIP >>> func1() >>> func2() ''') # source_lines = string.splitlines() self = parser.DoctestParser() parts = self.parse(string) print('----') for part in parts: print(part.source) print('----') try: print(parts) except ImportError: pass assert len(parts) == 1 def test_block_directive_nowant2(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_nowant """ string = utils.codeblock( ''' >>> # doctest: +SKIP >>> func1() >>> func2() >>> # doctest: +SKIP >>> func1() >>> func2() ''') # source_lines = string.splitlines() self = parser.DoctestParser() parts = self.parse(string) # TODO: finish me assert len(parts) == 2 def test_block_directive_want1_assign(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_want1 """ string = utils.codeblock( ''' >>> # doctest: +SKIP >>> func1() >>> _ = func2() # assign this line so we dont break it off for eval want ''') self = parser.DoctestParser() parts = self.parse(string) print('----') for part in parts: print(part.source) print('----') try: print(parts) except ImportError: pass assert len(parts) == 1 def test_block_directive_want1_eval(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_want1 """ string = utils.codeblock( ''' >>> # doctest: +SKIP >>> func1() >>> func2() # eval this line so it is broken off want ''') self = parser.DoctestParser() parts = self.parse(string) assert len(parts) == 2 def test_block_directive_want2_assign(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_want2 """ string = utils.codeblock( ''' >>> func1() >>> # doctest: +SKIP >>> func2() >>> _ = func3() want ''') self = parser.DoctestParser() parts = self.parse(string) assert len(parts) == 2 def test_block_directive_want2_eval(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_want2_eval """ string = utils.codeblock( ''' >>> func1() >>> # doctest: +SKIP >>> func2() >>> func3() want ''') self = parser.DoctestParser() parts = self.parse(string) print('----') for part in parts: print(part.source) print('----') try: print(parts) except ImportError: pass assert len(parts) == 3 def test_block_directive_want2_eval2(): """ python ~/code/xdoctest/tests/test_parser.py test_block_directive_want2_eval """ string = utils.codeblock( ''' >>> func1() >>> func1() >>> # doctest: +SKIP >>> func2() >>> func2() >>> # doctest: +SKIP >>> func3() >>> func3() >>> func3() >>> func4() want ''') self = parser.DoctestParser() parts = self.parse(string) assert len(parts) == 4 def test_gh_issue_25_parsing_failure(): string = utils.codeblock( ''' >>> _, o = 0, 1 >>> A = B = C = D = 1 >>> cc_mask = [ # Y >>> [ _, _, _, o, _, _, ], # 0 >>> [ _, _, o, o, o, _, ], # 1 >>> [ _, o, o, o, o, o, ], # 2 >>> [ o, o, o, o, o, _, ], # 3 >>> [ _, o, o, o, _, _, ], # 4 >>> [ _, _, o, o, _, _, ], # 5 >>> # X: 0 1 2 3 4 5 6 >>> ] >>> # a regular comment >>> print(cc_mask) ''') source_lines = string.splitlines() self = parser.DoctestParser() ps1_linenos = self._locate_ps1_linenos(source_lines)[0] assert ps1_linenos == [0, 1, 2, 11, 12] parts = self.parse(string) assert len(parts) == 1 def test_parser_with_type_annot(): string = utils.codeblock( ''' >>> def foo(x: str) -> None: >>> ... ''') source_lines = string.splitlines() self = parser.DoctestParser() ps1_linenos = self._locate_ps1_linenos(source_lines)[0] assert ps1_linenos == [0] parts = self.parse(string) assert len(parts) == 1 def test_parse_tabs(): tab = '\t' string = utils.codeblock( f''' >>> text = "tab{tab}sep{tab}val" ''') self = parser.DoctestParser() parts = self.parse(string) doctest_part = parts[0] assert tab in doctest_part.source if __name__ == '__main__': """ CommandLine: python ~/code/xdoctest/tests/test_parser.py --help python ~/code/xdoctest/tests/test_parser.py test_inline_directive python ~/code/xdoctest/tests/test_parser.py zero-all python ~/code/xdoctest/tests/test_parser.py test_gh_issue_25_parsing_failure pytest tests/test_parser.py -vv """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_plugin.py000066400000000000000000001667551505122333300216240ustar00rootroot00000000000000""" Adapted from the original `pytest/tests/test_doctest.py` module at: https://github.com/pytest-dev/pytest https://github.com/pytest-dev/pytest/blob/main/tests/test_doctest.py """ import shlex import sys import _pytest._code from xdoctest.plugin import XDoctestItem, XDoctestModule, XDoctestTextfile from xdoctest import utils import pytest try: from packaging.version import parse as LooseVersion except ImportError: from distutils.version import LooseVersion IS_GE_PY306 = sys.version_info[:2] >= (3, 6) IS_GE_PY307 = sys.version_info[:2] >= (3, 7) MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if IS_GE_PY306 else 'ImportError' EXTRA_ARGS = ['-p', 'pytester', '-p', 'no:doctest', '--xdoctest-nocolor'] # Behavior has changed to not test text files by default OLD_TEXT_ARGS = ['--xdoc-glob=*.txt'] # def print(text): # """ Hack so we can get stdout when debugging the plugin file """ # import os # fpath = os.path.expanduser('~/plugin.stdout.txt') # with open(fpath, 'a') as file: # file.write(str(text) + '\n') def explicit_testdir(): r""" Explicitly constructs a testdir for use in IPython development Note used by any tests. # https://xr.gs/2017/07/pytest-dynamic-runtime-fixtures-python3/ https://stackoverflow.com/questions/45970572/how-to-get-a-pytest-fixture-interactively Ignore: python -c codeblock " import subprocess, grp import imp, inspect, textwrap, pprint, json, tempfile, string, lzma, bz2, shutil import glob, time, struct, bisect, pdb, platform, atexit, shlex, import sys s1 = set(sys.modules) import pytest s2 = set(sys.modules) print('\n'.join(sorted(s2 - s1))) " Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * """ # modpath = _modname_to_modpath_backup('pytest') # import pytest # NOQA # import sys # if 'pytest' in sys.modules: # for k in list(sys.modules): # if k.startswith(('_pytest.', 'py.')): # del sys.modules[k] # elif k in {'_pytest', 'py'}: # del sys.modules[k] # import _pytest # import _pytest.config # import _pytest.main # import _pytest.tmpdir # import _pytest.fixtures # import _pytest.runner # import _pytest.python # _pytest.config._preloadplugins() # to populate pytest.* namespace so help(pytest) works import _pytest config = _pytest.config._prepareconfig(['-s'], plugins=['pytester']) session = _pytest.main.Session(config) _pytest.tmpdir.pytest_configure(config) _pytest.fixtures.pytest_sessionstart(session) _pytest.runner.pytest_sessionstart(session) def func(testdir): pass parent = _pytest.python.Module('parent', config=config, session=session, nodeid='myparent') function = _pytest.python.Function( 'func', parent, callobj=func, config=config, session=session, originalname='func') # Under the hood this does: # > function._request._fillfixtures() # > which does # > self = function._request # > argname = 'tmpdir_factory' # > self.getfixturevalue(argname) # > self._get_active_fixturedef(argname) # > self._getnextfixturedef(argname) # > fixturedefs = self._arg2fixturedefs.get(argname, None) # > self._compute_fixture_value(fixturedefs[0]) if False: # This used to work, but now it doesn't _pytest.fixtures.fillfixtures(function) testdir = function.funcargs['testdir'] else: # Now this is the hack self = function._request # argname = 'tmpdir_factory' argname = 'testdir' fixturedef = self._arg2fixturedefs.get(argname, None)[0] fixturedef.scope = 'function' self._compute_fixture_value(fixturedef) testdir = fixturedef.cached_result[0] # from _pytest.compat import _setup_collect_fakemodule # _setup_collect_fakemodule() return testdir class TestXDoctestActivation: @pytest.mark.parametrize(('flags', 'load'), [('', False), ('--xdoc', True), ('--doctest-modules', False), ('--doctest-modules --xdoctest', True)]) def test_xdoctest_cli_activation(self, request, flags, load): """ Activate `xdoctest` via command-line arguments. CommandLine: pytest tests/test_plugin.py::TestXDoctestActivation::\ test_xdoctest_cli_activation """ self._check_activation(request, flags, load) def test_xdoctest_config_activation(self, request): """ Activate `xdoctest` via config file. CommandLine: pytest tests/test_plugin.py::TestXDoctestActivation::\ test_xdoctest_config_activation """ self._get_tester(request).makeini(''' [pytest] addopts = '--xdoctest' ''') self._check_activation(request, '', True) def test_xdoctest_explicit_suppression(self, request): """ Deactivate `xdoctest` via explicitly unloading the plugin on the command line. CommandLine: pytest tests/test_plugin.py::TestXDoctestActivation::\ test_xdoctest_explicit_suppression """ pdt_namespace_before = self._get_pytest_doctest_module_dict() try: with pytest.raises(pytest.UsageError): # Can't parse the `--xdoc` flag with `xdoctest` disabled self._check_activation(request, '--xdoc -p no:xdoctest', False) finally: # Check that `_pytest.doctest` is untouched pdt_namespace_after = self._get_pytest_doctest_module_dict() assert pdt_namespace_before == pdt_namespace_after def _check_activation(self, request, flags, load): """ Check that if :py:mod:`xdoctest.plugin` is ``load``-ed, :py:mod:`_pytest.doctest` is unloaded but otherwise untouched. """ pdt_namespace_before = self._get_pytest_doctest_module_dict() try: config = (self ._get_tester(request) .parseconfigure(*shlex.split(flags))) manager = config.pluginmanager # When `--xdoctest` is set, it unsets other doctest plugins if load: assert manager.get_plugin('doctest') is None finally: # Also check that `_pytest.doctest` is untouched pdt_namespace_after = self._get_pytest_doctest_module_dict() assert pdt_namespace_before == pdt_namespace_after @staticmethod def _get_tester(request): try: from pytest import FixtureLookupError except ImportError: # Version < 6.0 from _pytest.fixtures import FixtureLookupError try: return request.getfixturevalue('pytester') except FixtureLookupError: return request.getfixturevalue('testdir') @staticmethod def _get_pytest_doctest_module_dict(): from _pytest import doctest return dict(vars(doctest)) class TestXDoctest: def test_collect_testtextfile(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_collect_testtextfile """ w = testdir.maketxtfile(whatever="") checkfile = testdir.maketxtfile(test_something=""" alskdjalsdk >>> i = 5 >>> i-1 4 """) for x in (testdir.tmpdir, checkfile): items, reprec = testdir.inline_genitems(x, '--xdoc-glob', '*.txt', *EXTRA_ARGS) assert len(items) == 1 assert isinstance(items[0], XDoctestItem) assert isinstance(items[0].parent, XDoctestTextfile) # Empty file has no items. items, reprec = testdir.inline_genitems(w) assert len(items) == 0 def test_collect_module_empty(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_collect_module_empty """ path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--xdoctest-modules') assert len(items) == 0 def test_simple_doctestfile(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_simple_doctestfile Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() """ p = testdir.maketxtfile(test_doc=""" >>> x = 1 >>> x == 1 False """) reprec = testdir.inline_run(p, *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(failed=1) def test_new_pattern(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_new_pattern """ p = testdir.maketxtfile(xdoc=""" >>> x = 1 >>> x == 1 False """) reprec = testdir.inline_run(p, "--xdoctest-glob=x*.txt", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(failed=1) def test_multiple_patterns(self, testdir): """Test support for multiple --xdoctest-glob arguments (#1255). CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_multiple_patterns """ testdir.maketxtfile(xdoc=""" >>> 1 1 """) testdir.makefile('.foo', test=""" >>> 1 1 """) testdir.maketxtfile(test_normal=""" >>> 1 1 """) expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt']) assert set(x.basename for x in testdir.tmpdir.listdir()) == expected args = ["--xdoctest-glob=xdoc*.txt", "--xdoctest-glob=*.foo", '-s'] result = testdir.runpytest(*(args + EXTRA_ARGS)) result.stdout.fnmatch_lines([ '*test.foo *', '*xdoc.txt *', '*2 passed*', ]) result = testdir.runpytest(*(EXTRA_ARGS + ['--xdoc-glob=test_normal.txt'])) result.stdout.fnmatch_lines([ '*test_normal.txt*', '*1 passed*', ]) if LooseVersion(pytest.__version__) < LooseVersion('6.2.1'): @pytest.mark.parametrize( ' test_string, encoding', [ (u'foo', 'ascii'), (u'öäü', 'latin1'), (u'öäü', 'utf-8') ] ) def test_encoding(self, testdir, test_string, encoding): """Test support for xdoctest_encoding ini option. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_encoding -s -v """ testdir.makeini(""" [pytest] xdoctest_encoding={0} """.format(encoding)) doctest = u""" >>> u"{0}" {1} """.format(test_string, repr(test_string)) print(doctest) testdir._makefile(".txt", [doctest], {}, encoding=encoding) result = testdir.runpytest("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines([ '*1 passed*', ]) else: @pytest.mark.parametrize( " test_string, encoding", [("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")], ) def test_encoding(self, pytester, test_string, encoding): """Test support for doctest_encoding ini option.""" pytester.makeini( """ [pytest] xdoctest_encoding={} """.format( encoding ) ) doctest = """ >>> "{}" {} """.format( test_string, repr(test_string) ) fn = pytester.path / "test_encoding.txt" fn.write_text(doctest, encoding=encoding) result = pytester.runpytest("--xdoctest", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines(["*1 passed*"]) def test_xdoctest_options(self, testdir): """Test support for xdoctest_encoding ini option. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_xdoctest_options """ # Add command line that skips all doctests by default testdir.makeini(''' [pytest] addopts= --xdoc-options=SKIP ''') p = testdir.makepyfile(''' def add_one(x): """ >>> add_one(1) 2 """ return x + 1 ''') reprec = testdir.inline_run(p, "--xdoctest-modules", *EXTRA_ARGS) reprec.assertoutcome(skipped=1, failed=0, passed=0) def test_doctest_unexpected_exception(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_unexpected_exception Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() >>> self = TestXDoctest() >>> self.test_doctest_unexpected_exception(testdir) """ # import sys # try: # i = 0 # 0 / i # except Exception as ex: # exc_info = sys.exc_info() # import traceback # traceback.format_exception(*exc_info) testdir.maketxtfile(""" >>> i = 0 >>> 0 / i 2 """) result = testdir.runpytest("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) # print('') # print('\n'.join(result.stdout.lines)) # print('') result.stdout.fnmatch_lines([ "*>>> i = 0*", "*>>> 0 / i*", ]) # result.stdout.fnmatch_lines([ # "*unexpected_exception*", # "*>>> i = 0*", # "*>>> 0 / i*", # "*FAILED*ZeroDivision*", # ]) def test_doctest_property_lineno(self, testdir): """ REPLACES: test_doctest_linedata_missing REASON: Static parsing means we do know this line number. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_property_lineno -v -s """ testdir.tmpdir.join('hello.py').write(_pytest._code.Source(utils.codeblock( """ class Fun: @property def test(self): ''' >>> a = 1 >>> 1 / 0 ''' """))) result = testdir.runpytest("--xdoctest-modules", *EXTRA_ARGS) print('\n'.join(result.stdout.lines)) result.stdout.fnmatch_lines([ "*REASON: ZeroDivisionError*", '*line 2*', '*line 6*', "*1 >>> a = 1*", "*2 >>> 1 / 0*", "*ZeroDivision*", "*1 failed*", ]) def test_doctest_property_lineno_freeform(self, testdir): """ REPLACES: test_doctest_linedata_missing REASON: Static parsing means we do know this line number. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_property_lineno_freeform -v -s """ testdir.tmpdir.join('hello.py').write(_pytest._code.Source(utils.codeblock( """ class Fun: @property def test(self): ''' one line docs Example: >>> a = 1 >>> 1 / 0 ''' """))) result = testdir.runpytest("--xdoctest-modules", "--xdoc-style=freeform", *EXTRA_ARGS) print('\n'.join(result.stdout.lines)) result.stdout.fnmatch_lines([ "* REASON: ZeroDivisionError", '*line 2*', '*line 9*', "*1 >>> a = 1*", "*2 >>> 1 / 0*", "*ZeroDivision*", "*1 failed*", ]) def test_doctest_property_lineno_google(self, testdir): """ REPLACES: test_doctest_linedata_missing REASON: Static parsing means we do know this line number. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_property_lineno_google -v -s """ testdir.tmpdir.join('hello.py').write(_pytest._code.Source(utils.codeblock( """ class Fun: @property def test(self): ''' one line docs Example: >>> a = 1 >>> 1 / 0 ''' """))) result = testdir.runpytest("--xdoctest-modules", "--xdoc-style=google", *EXTRA_ARGS) print('\n'.join(result.stdout.lines)) result.stdout.fnmatch_lines([ "* REASON: ZeroDivisionError", '*line 2*', '*line 9*', "*1 >>> a = 1*", "*2 >>> 1 / 0*", "*ZeroDivision*", "*1 failed*", ]) def test_doctest_property_lineno_google_v2(self, testdir): """ REPLACES: test_doctest_linedata_missing REASON: Static parsing means we do know this line number. At one point in xdoctest history this test failed while the other version passed CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_property_lineno_google_v2 -v -s """ testdir.tmpdir.join('hello.py').write(_pytest._code.Source(utils.codeblock( """ class Fun: @property def test(self): ''' Example: >>> a = 1 >>> 1 / 0 ''' """))) result = testdir.runpytest("--xdoctest-modules", "--xdoc-style=google", *EXTRA_ARGS) print('\n'.join(result.stdout.lines)) result.stdout.fnmatch_lines([ "* REASON: ZeroDivisionError", '*line 3*', '*line 8*', "*2 >>> a = 1*", "*3 >>> 1 / 0*", "*ZeroDivision*", "*1 failed*", ]) def test_docstring_show_entire_doctest(self, testdir): """Test that we show the entire doctest when there is a failure REPLACES: test_docstring_context_around_error REPLACES: test_docstring_context_around_error # XDOCTEST DOES NOT SHOW NON-SOURCE CONTEXT CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_docstring_show_entire_doctest """ testdir.makepyfile(utils.codeblock( ''' def foo(): """ Example: >>> x = 4 >>> x = 5 + x >>> x = 6 + x >>> x = 7 + x >>> x 22 >>> x = 8 + x >>> x = 9 + x >>> x = 10 + x >>> x = 11 + x >>> x = 12 + x >>> x 42 text-line-after """ ''')) result = testdir.runpytest('--xdoctest-modules', *EXTRA_ARGS) result.stdout.fnmatch_lines([ '* 1 >>> x = 4*', '* 2 >>> x = 5 + x*', '* 3 >>> x = 6 + x*', '* 4 >>> x = 7 + x*', '* 5 >>> x*', '* 7 >>> x = 8 + x*', '* 8 >>> x = 9 + x*', '* 9 >>> x = 10 + x*', '*10 >>> x = 11 + x*', '*11 >>> x = 12 + x*', '*12 >>> x*', 'Expected:', ' 42', 'Got:', ' 72', ]) # non-source lines should be trimmed out assert 'Example:' not in result.stdout.str() assert 'text-line-after' not in result.stdout.str() def test_doctest_unex_importerror_only_txt(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_unex_importerror_only_txt """ testdir.maketxtfile(""" >>> import asdalsdkjaslkdjasd """) result = testdir.runpytest("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) # xdoctest is never executed because of error during hello.py collection result.stdout.fnmatch_lines([ "*>>> import asdals*", "*{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR), ]) def test_doctest_unex_importerror_with_module(self, testdir): """ CHANGES: No longer fails during collection because we're doing static-parsing baby! CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_unex_importerror_with_module """ testdir.tmpdir.join("hello.py").write(_pytest._code.Source(""" import asdalsdkjaslkdjasd """)) testdir.maketxtfile(""" >>> import hello """) # because python is not started from this dir, it cant find the hello # module in the temporary dir without adding it to the path import os import sys cwd = os.getcwd() sys.path.append(cwd) result = testdir.runpytest("--xdoctest-modules", "-s", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines([ '*1 >>> import hello*', "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR), # "*Interrupted: 1 errors during collection*", ]) sys.path.pop() @pytest.mark.skip('pytest 3.7.0 broke this. Not sure why') def test_doctestmodule_external_and_issue116(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctestmodule_external_and_issue116 Ignore: cd ~/code/xdoctest/tests/data pytest --xdoctest-modules -p pytester -p no:doctest --xdoctest-nocolor pip install pytest==3.6.3 pytest tests/test_plugin.py::TestXDoctest::test_doctestmodule_external_and_issue116 pip install pytest==3.6.4 pytest tests/test_plugin.py::TestXDoctest::test_doctestmodule_external_and_issue116 pip install pytest==3.7.0 pytest tests/test_plugin.py::TestXDoctest::test_doctestmodule_external_and_issue116 This was working on pytest-3.6.4 It now fails on on pytest-3.7.0 Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() >>> self = TestXDoctest() >>> self.test_doctestmodule_external_and_issue116(testdir) """ p = testdir.mkpydir("hello_2") p.join("__init__.py").write(_pytest._code.Source(""" def somefunc(): ''' >>> i = 0 >>> i + 1 2 ''' """)) result = testdir.runpytest(p, "--xdoctest-modules", *EXTRA_ARGS) result.stdout.fnmatch_lines([ '*1 *>>> i = 0', '*2 *>>> i + 1', '**', '*Expected:', "* 2", "*Got:", "* 1", '**', "*:6: GotWantException", ]) def test_txtfile_failing(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_txtfile_failing -s """ p = testdir.maketxtfile(""" >>> i = 0 >>> i + 1 2 """) result = testdir.runpytest(p, "--xdoctest-modules", "-s", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines([ '*1 >>> i = 0', '*2 >>> i + 1', '**', 'Expected:', " 2", "Got:", " 1", '**', "*test_txtfile_failing.txt:3: GotWantException" ]) def test_txtfile_with_fixtures(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_txtfile_with_fixtures """ p = testdir.maketxtfile(""" >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' """) reprec = testdir.inline_run(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=1) def test_txtfile_with_usefixtures_in_ini(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_txtfile_with_usefixtures_in_ini """ testdir.makeini(""" [pytest] usefixtures = myfixture """) testdir.makeconftest(""" import pytest @pytest.fixture def myfixture(monkeypatch): monkeypatch.setenv("HELLO", "WORLD") """) p = testdir.maketxtfile(""" >>> import os >>> os.environ["HELLO"] 'WORLD' """) reprec = testdir.inline_run(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=1) def test_ignored_whitespace(self, testdir): testdir.makeini(""" [pytest] doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE """) p = testdir.makepyfile(""" class MyClass: ''' >>> a = "foo " >>> print(a) foo ''' pass """) reprec = testdir.inline_run(p, "--xdoctest-modules", *EXTRA_ARGS) reprec.assertoutcome(passed=1) def test_ignored_whitespace_glob(self, testdir): testdir.makeini(""" [pytest] doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE """) p = testdir.maketxtfile(xdoc=""" >>> a = "foo " >>> print(a) foo """) reprec = testdir.inline_run(p, "--xdoctest-glob=x*.txt", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=1) def test_contains_unicode(self, testdir): """Fix internal error with docstrings containing non-ascii characters. pytest tests/test_plugin.py -k test_contains_unicode """ testdir.makepyfile(u''' # encoding: utf-8 def foo(): """ >>> name = 'с' # not letter 'c' but instead Cyrillic 's'. 'anything' """ ''') result = testdir.runpytest('--xdoctest-modules', *EXTRA_ARGS) result.stdout.fnmatch_lines([ 'Got nothing', '* 1 failed*', ]) def test_junit_report_for_doctest(self, testdir): """ #713: Fix --junit-xml option when used with --xdoctest-modules. """ p = testdir.makepyfile(""" def foo(): ''' >>> 1 + 1 3 ''' pass """) reprec = testdir.inline_run(p, "--xdoctest-modules", "--junit-xml=junit.xml") reprec.assertoutcome(failed=1) def test_unicode_doctest_module(self, testdir): """ Test case for issue 2434: DecodeError on Python 2 when xdoctest docstring contains non-ascii characters. pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_unicode_doctest_module """ p = testdir.makepyfile(test_unicode_doctest_module=""" def fix_bad_unicode(text): ''' >>> print(fix_bad_unicode('único')) único ''' return "único" """) result = testdir.runpytest(p, '--xdoctest-modules', *EXTRA_ARGS) result.stdout.fnmatch_lines(['* 1 passed*']) def test_xdoctest_multiline_list(self, testdir): """ pytest tests/test_plugin.py -k test_xdoctest_multiline_list """ p = testdir.maketxtfile(test_xdoctest_multiline_string=""" .. xdoctest:: >>> x = [1, 2, 3, >>> 4, 5, 6] >>> print(len(x)) 6 """) result = testdir.runpytest(p, "--xdoctest-modules", *EXTRA_ARGS) result.stdout.fnmatch_lines(['* 1 passed*']) def test_xdoctest_multiline_string(self, testdir): """ pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_xdoctest_multiline_string """ import textwrap p = testdir.maketxtfile(test_xdoctest_multiline_string=textwrap.dedent( """ .. xdoctest:: # Old way >>> print(''' ... It would be nice if we didnt have to deal with prefixes ... in multiline strings. ... '''.strip()) It would be nice if we didnt have to deal with prefixes in multiline strings. # New way >>> print(''' Multiline can now be written without prefixes. Editing them is much more natural. '''.strip()) Multiline can now be written without prefixes. Editing them is much more natural. # This is ok too >>> print(''' >>> Just prefix everything with >>> and the xdoctest should work >>> '''.strip()) Just prefix everything with >>> and the xdoctest should work """).lstrip()) result = testdir.runpytest(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines(['* 1 passed*']) def test_xdoctest_trycatch(self, testdir): """ CommandLine: pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_xdoctest_trycatch """ p = testdir.maketxtfile(test_xdoctest_multiline_string=""" .. xdoctest:: # Old way >>> try: ... print('foo') ... except Exception as ex: ... print('baz') ... else: ... print('bar') foo bar # New way >>> try: >>> print('foo') >>> except Exception as ex: >>> print('baz') >>> else: >>> print('bar') foo bar """) result = testdir.runpytest(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines(['* 1 passed*']) def test_xdoctest_functions(self, testdir): """ CommandLine: pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_xdoctest_functions """ p = testdir.maketxtfile(test_xdoctest_multiline_string=""" .. xdoctest:: # Old way >>> def func(): ... print('before doctests were nice for the regex parser') >>> func() before doctests were nice for the regex parser # New way >>> def func(): >>> print('now the ast parser makes doctests nice for us') >>> func() now the ast parser makes doctests nice for us """) result = testdir.runpytest(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines(['* 1 passed*']) def test_stdout_capture_no(self, testdir): """ Test for xdoctest#3 pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_stdout_capture_no Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() """ p = testdir.makepyfile(test_unicode_doctest_module=''' def foo(): """ Example: >>> foo() >>> print('in-doctest-print') """ print('in-func-print') ''') result = testdir.runpytest(p, '-s', '--xdoctest-modules', '--xdoctest-verbose=3', *EXTRA_ARGS) result.stdout.fnmatch_lines(['in-doctest-print']) result.stdout.fnmatch_lines(['in-func-print']) def test_stdout_capture_yes(self, testdir): """ Test for xdoctest#3 pytest -rsxX -p pytester tests/test_plugin.py::TestXDoctest::test_stdout_capture_yes """ p = testdir.makepyfile(test_unicode_doctest_module=''' def foo(): """ Example: >>> foo() >>> print('in-doctest-print') """ print('in-func-print') ''') result = testdir.runpytest(p, '--xdoctest-modules', *EXTRA_ARGS) assert all('in-doctest-print' not in line for line in result.stdout.lines) assert all('in-func-print' not in line for line in result.stdout.lines) class TestXDoctestModuleLevel: def test_doctestmodule(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestModuleLevel::test_doctestmodule Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() >>> self = TestXDoctest() """ p = testdir.makepyfile(""" ''' >>> x = 1 >>> x == 1 False ''' """) reprec = testdir.inline_run(p, "--xdoctest-modules") # print(reprec.stdout.str()) # print(reprec.listoutcomes()) reprec.assertoutcome(failed=1) def test_collect_module_single_modulelevel_doctest(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestModuleLevel::test_collect_module_single_modulelevel_doctest Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() >>> self = TestXDoctestModuleLevel() """ path = testdir.makepyfile(whatever='""">>> pass"""') for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--xdoc', *EXTRA_ARGS) assert len(items) == 1 assert isinstance(items[0], XDoctestItem) assert isinstance(items[0].parent, XDoctestModule) def test_collect_module_two_doctest_one_modulelevel(self, testdir): path = testdir.makepyfile(whatever=""" '>>> x = None' def my_func(): ">>> magic = 42 " """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--xdoc', *EXTRA_ARGS) assert len(items) == 2 assert isinstance(items[0], XDoctestItem) assert isinstance(items[1], XDoctestItem) assert isinstance(items[0].parent, XDoctestModule) assert items[0].parent is items[1].parent def test_collect_module_two_doctest_no_modulelevel(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestModuleLevel::test_collect_module_two_doctest_no_modulelevel Ignore: >>> import sys >>> sys.path.append('/home/joncrall/code/xdoctest/tests') >>> from test_plugin import * >>> testdir = explicit_testdir() >>> self = TestXDoctestModuleLevel() """ path = testdir.makepyfile(whatever=""" '# Empty' def my_func(): ">>> magic = 42 " def not_useful(): ''' # This is a function # >>> # it doesn't have any xdoctest ''' def another(): ''' # This is another function >>> import os # this one does have a xdoctest ''' """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--xdoc', *EXTRA_ARGS) print('reprec = {!r}'.format(reprec)) print('items = {!r}'.format(items)) assert len(items) == 2 assert isinstance(items[0], XDoctestItem) assert isinstance(items[1], XDoctestItem) assert isinstance(items[0].parent, XDoctestModule) assert items[0].parent is items[1].parent class TestLiterals: @pytest.mark.parametrize('config_mode', ['ini', 'comment']) @pytest.mark.skip('bytes are not supported yet') def test_allow_unicode(self, testdir, config_mode): """Test that doctests which output unicode work in all python versions tested by pytest when the ALLOW_UNICODE option is used (either in the ini file or by an inline comment). """ if config_mode == 'ini': testdir.makeini(''' [pytest] doctest_optionflags = ALLOW_UNICODE ''') comment = '' else: comment = '#xdoctest: +ALLOW_UNICODE' testdir.maketxtfile(test_doc=""" >>> b'12'.decode('ascii') {comment} '12' """.format(comment=comment)) testdir.makepyfile(foo=""" def foo(): ''' >>> b'12'.decode('ascii') {comment} '12' ''' """.format(comment=comment)) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=2) @pytest.mark.parametrize('config_mode', ['ini', 'comment']) @pytest.mark.skip('bytes are not supported yet') def test_allow_bytes(self, testdir, config_mode): """Test that doctests which output bytes work in all python versions tested by pytest when the ALLOW_BYTES option is used (either in the ini file or by an inline comment)(#1287). """ if config_mode == 'ini': testdir.makeini(''' [pytest] doctest_optionflags = ALLOW_BYTES ''') comment = '' else: comment = '#xdoctest: +ALLOW_BYTES' testdir.maketxtfile(test_doc=""" >>> b'foo' {comment} 'foo' """.format(comment=comment)) testdir.makepyfile(foo=""" def foo(): ''' >>> b'foo' {comment} 'foo' ''' """.format(comment=comment)) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=2) @pytest.mark.skip('bytes are not supported yet') def test_unicode_string(self, testdir): """Test that doctests which output unicode fail in Python 2 when the ALLOW_UNICODE option is not used. The same test should pass in Python 3. """ testdir.maketxtfile(test_doc=""" >>> b'12'.decode('ascii') '12' """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) passed = int(sys.version_info[0] >= 3) reprec.assertoutcome(passed=passed, failed=int(not passed)) @pytest.mark.skip('bytes are not supported yet') def test_bytes_literal(self, testdir): """Test that doctests which output bytes fail in Python 3 when the ALLOW_BYTES option is not used. The same test should pass in Python 2 (#1287). """ testdir.maketxtfile(test_doc=""" >>> b'foo' 'foo' """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) passed = int(sys.version_info[0] == 2) reprec.assertoutcome(passed=passed, failed=int(not passed)) class TestXDoctestSkips: """ If all examples in a xdoctest are skipped due to the SKIP option, then the tests should be SKIPPED rather than PASSED. (#957) CommandLine pytest tests/test_plugin.py::TestXDoctestSkips """ def test_xdoctest_skips_diabled(self, testdir): testdir.makepyfile(foo=""" import sys def foo(): ''' DisableDoctest: >>> True True ''' """) result = testdir.runpytest("--xdoctest-modules", *EXTRA_ARGS) if True: pass else: result.stdout.fnmatch_lines(['*no tests ran*']) @pytest.fixture(params=['text', 'module']) def makedoctest(self, testdir, request): def makeit(xdoctest): mode = request.param if mode == 'text': testdir.maketxtfile(xdoctest) else: assert mode == 'module' testdir.makepyfile('"""\n%s"""' % xdoctest) return makeit def test_one_skipped_passed(self, testdir, makedoctest): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestSkips::test_one_skipped_passed """ makedoctest(""" >>> 1 + 1 # xdoctest: +SKIP 4 >>> 2 + 2 4 """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=1) def test_one_skipped_failed(self, testdir, makedoctest): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestSkips::test_one_skipped_failed """ makedoctest(""" >>> 1 + 1 # xdoctest: +SKIP 4 >>> 2 + 2 200 """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(failed=1) def test_all_skipped(self, testdir, makedoctest): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestSkips::test_all_skipped """ makedoctest(""" >>> 1 + 1 # xdoctest: +SKIP 2 >>> 2 + 2 # xdoctest: +SKIP 200 """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) # In xdoctest blocks are considered as a whole, so skipped lines do not # count towards completely skipped doctests unless nothing was run, as # is the case here. reprec.assertoutcome(passed=0, skipped=1) def test_all_skipped_global(self, testdir, makedoctest): """ CommandLine: pytest tests/test_plugin.py::TestXDoctestSkips::test_all_skipped_global """ # Test new global directive added in xdoctest makedoctest(""" >>> # xdoctest: +SKIP >>> 1 + 1 2 >>> 2 + 2 200 """) reprec = testdir.inline_run("--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=0, skipped=1) def test_vacuous_all_skipped(self, testdir, makedoctest): makedoctest('') reprec = testdir.inline_run("--xdoctest-modules", *EXTRA_ARGS) reprec.assertoutcome(passed=0, skipped=0) class TestXDoctestAutoUseFixtures: SCOPES = ['module', 'session', 'class', 'function'] def test_doctest_module_session_fixture(self, testdir): """ Test that session fixtures are initialized for xdoctest modules (#768) pytest tests/test_plugin.py -k test_doctest_module_session_fixture """ # session fixture which changes some global data, which will # be accessed by doctests in a module testdir.makeconftest( """ import pytest import sys @pytest.fixture(autouse=True, scope='session') def myfixture(): assert not hasattr(sys, 'pytest_session_data') sys.pytest_session_data = 1 yield del sys.pytest_session_data """ ) testdir.makepyfile( foo=""" import sys def foo(): ''' >>> assert sys.pytest_session_data == 1 ''' def bar(): ''' >>> assert sys.pytest_session_data == 1 ''' """ ) result = testdir.runpytest("--xdoctest-modules", "-s") result.stdout.fnmatch_lines(["*2 passed*"]) @pytest.mark.parametrize('scope', SCOPES) @pytest.mark.parametrize('enable_doctest', [True, False]) def test_fixture_scopes(self, testdir, scope, enable_doctest): """Test that auto-use fixtures work properly with xdoctest modules. See #1057 and #1100. pytest tests/test_plugin.py -k test_fixture_scopes """ testdir.makeconftest(''' import pytest @pytest.fixture(autouse=True, scope="{scope}") def auto(request): return 99 '''.format(scope=scope)) testdir.makepyfile(test_1=''' def test_foo(): """ >>> getfixture('auto') + 1 100 """ def test_bar(): assert 1 ''') params = ('--xdoctest-modules',) if enable_doctest else () passes = 3 if enable_doctest else 2 result = testdir.runpytest(*params) result.stdout.fnmatch_lines(['* %d passed*' % passes]) @pytest.mark.parametrize('scope', SCOPES) @pytest.mark.parametrize('autouse', [True, False]) @pytest.mark.parametrize('use_fixture_in_doctest', [True, False]) def test_fixture_module_doctest_scopes(self, testdir, scope, autouse, use_fixture_in_doctest): """Test that auto-use fixtures work properly with xdoctest files. See #1057 and #1100. pytest tests/test_plugin.py -k test_fixture_module_doctest_scopes """ testdir.makeconftest(''' import pytest @pytest.fixture(autouse={autouse}, scope="{scope}") def auto(request): return 99 '''.format(scope=scope, autouse=autouse)) if use_fixture_in_doctest: testdir.maketxtfile(test_doc=""" >>> getfixture('auto') 99 """) else: testdir.maketxtfile(test_doc=""" >>> 1 + 1 2 """) result = testdir.runpytest('--xdoctest-modules', *(EXTRA_ARGS + OLD_TEXT_ARGS)) assert 'FAILURES' not in str(result.stdout.str()) result.stdout.fnmatch_lines(['* 1 passed*']) @pytest.mark.parametrize('scope', SCOPES) def test_auto_use_request_attributes(self, testdir, scope): """Check that all attributes of a request in an autouse fixture behave as expected when requested for a xdoctest item. """ testdir.makeconftest(''' import pytest @pytest.fixture(autouse=True, scope="{scope}") def auto(request): if "{scope}" == 'module': assert request.module is None if "{scope}" == 'class': assert request.cls is None if "{scope}" == 'function': assert request.function is None return 99 '''.format(scope=scope)) testdir.maketxtfile(test_doc=""" >>> 1 + 1 2 """) result = testdir.runpytest('--xdoctest-modules', *(EXTRA_ARGS + OLD_TEXT_ARGS)) assert 'FAILURES' not in str(result.stdout.str()) result.stdout.fnmatch_lines(['* 1 passed*']) @pytest.mark.skip class TestXDoctestNamespaceFixture: """ Not sure why these tests wont work FIXME: These dont work because xdoctest does not support running with fixtures yet. pytest tests/test_plugin.py::TestXDoctestNamespaceFixture """ SCOPES = ['module', 'session', 'class', 'function'] @pytest.mark.parametrize('scope', SCOPES) def test_namespace_doctestfile(self, testdir, scope): """ Check that inserting something into the namespace works in a simple text file xdoctest """ testdir.makeconftest(""" import pytest import contextlib @pytest.fixture(autouse=True, scope="{scope}") def add_contextlib(doctest_namespace): doctest_namespace['cl'] = contextlib """.format(scope=scope)) p = testdir.maketxtfile(""" >>> print(cl.__name__) contextlib """) reprec = testdir.inline_run(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(passed=1) @pytest.mark.parametrize('scope', SCOPES) def test_namespace_pyfile(self, testdir, scope): """ Check that inserting something into the namespace works in a simple Python file docstring xdoctest pytest tests/test_plugin.py::TestXDoctestNamespaceFixture::test_namespace_pyfile """ testdir.makeconftest(""" import pytest import contextlib @pytest.fixture(autouse=True, scope="{scope}") def add_contextlib(doctest_namespace): doctest_namespace['cl'] = contextlib """.format(scope=scope)) p = testdir.makepyfile(""" def foo(): ''' >>> print(cl.__name__) contextlib ''' """) reprec = testdir.inline_run(p, "--xdoctest-modules", *EXTRA_ARGS) reprec.assertoutcome(passed=1) class TestXDoctestReportingOption: def _run_doctest_report(self, testdir, format): testdir.makepyfile(""" def foo(): ''' >>> # xdoc: -NORMALIZE_WHITESPACE >>> foo() a b 0 1 4 1 2 4 2 3 6 ''' print(' a b\\n' '0 1 4\\n' '1 2 5\\n' '2 3 6') """) return testdir.runpytest("--xdoctest-modules", "--xdoctest-report", format, *EXTRA_ARGS) @pytest.mark.parametrize('format', ['udiff', 'UDIFF', 'uDiFf']) def test_doctest_report_udiff(self, testdir, format): """ pytest tests/test_plugin.py::TestXDoctestReportingOption::test_doctest_report_udiff """ result = self._run_doctest_report(testdir, format) result.stdout.fnmatch_lines([ ' 0 1 4', ' -1 2 4', ' +1 2 5', ' 2 3 6', ]) def test_doctest_report_cdiff(self, testdir): """ pytest tests/test_plugin.py::TestXDoctestReportingOption::test_doctest_report_cdiff """ result = self._run_doctest_report(testdir, 'cdiff') result.stdout.fnmatch_lines([ ' a b', ' 0 1 4', ' ! 1 2 4', ' 2 3 6', ' --- 1,4 ----', ' a b', ' 0 1 4', ' ! 1 2 5', ' 2 3 6', ]) def test_doctest_report_ndiff(self, testdir): """ pytest tests/test_plugin.py::TestXDoctestReportingOption::test_doctest_report_ndiff """ result = self._run_doctest_report(testdir, 'ndiff') result.stdout.fnmatch_lines([ ' a b', ' 0 1 4', ' - 1 2 4', ' ? ^', ' + 1 2 5', ' ? ^', ' 2 3 6', ]) @pytest.mark.parametrize('format', ['none', 'only_first_failure']) def test_doctest_report_none_or_only_first_failure(self, testdir, format): """ pytest tests/test_plugin.py::TestXDoctestReportingOption::test_doctest_report_none_or_only_first_failure """ result = self._run_doctest_report(testdir, format) result.stdout.fnmatch_lines([ 'Expected:', ' a b', ' 0 1 4', ' 1 2 4', ' 2 3 6', 'Got:', ' a b', ' 0 1 4', ' 1 2 5', ' 2 3 6', ]) def test_doctest_report_invalid(self, testdir): """ pytest tests/test_plugin.py::TestXDoctestReportingOption::test_doctest_report_invalid """ result = self._run_doctest_report(testdir, 'obviously_invalid_format') result.stderr.fnmatch_lines([ "*error: argument --xdoctest-report/--xdoc-report: invalid choice: 'obviously_invalid_format' (choose from*" ]) class Disabled: def test_docstring_context_around_error(self, testdir): """Test that we show some context before the actual line of a failing xdoctest. # XDOCTEST DOES NOT SHOW NON-SOURCE CONTEXT CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_docstring_context_around_error """ testdir.makepyfile(''' def foo(): """ text-line-1 text-line-2 text-line-3 text-line-4 text-line-5 text-line-6 text-line-7 text-line-8 text-line-9 text-line-10 text-line-11 >>> 1 + 1 3 text-line-after """ ''') result = testdir.runpytest('--xdoctest-modules', *EXTRA_ARGS) result.stdout.fnmatch_lines([ '*docstring_context_around_error*', '005*text-line-3', '006*text-line-4', '013*text-line-11', '014*>>> 1 + 1', 'Expected:', ' 3', 'Got:', ' 2', ]) # lines below should be trimmed out assert 'text-line-2' not in result.stdout.str() assert 'text-line-after' not in result.stdout.str() def test_doctest_linedata_missing(self, testdir): """ REPLACES: test_doctest_linedata_missing REASON: Static parsing means we do know this line number. CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctest_linedata_missing """ testdir.tmpdir.join('hello.py').write(_pytest._code.Source(""" class Fun: @property def test(self): ''' >>> a = 1 >>> 1/0 ''' """)) result = testdir.runpytest("--xdoctest-modules", *EXTRA_ARGS) result.stdout.fnmatch_lines([ "*hello*", "*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*", "*1/0*", "*REASON*ZeroDivision*", "*1 failed*", ]) def test_doctestmodule_with_fixtures(self, testdir): p = testdir.makepyfile(""" ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' ''' """) reprec = testdir.inline_run(p, "--xdoctest-modules") reprec.assertoutcome(passed=1) def test_doctestmodule_three_tests(self, testdir): p = testdir.makepyfile(""" ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' ''' def my_func(): ''' >>> magic = 42 >>> magic - 42 0 ''' def not_useful(): pass def another(): ''' >>> import os >>> os is os True ''' """) reprec = testdir.inline_run(p, "--xdoctest-modules") reprec.assertoutcome(passed=3) def test_doctestmodule_two_tests_one_fail(self, testdir): """ CommandLine: pytest tests/test_plugin.py::TestXDoctest::test_doctestmodule_two_tests_one_fail """ p = testdir.makepyfile(""" class MyClass: def bad_meth(self): ''' >>> magic = 42 >>> magic 0 ''' def nice_meth(self): ''' >>> magic = 42 >>> magic - 42 0 ''' """) reprec = testdir.inline_run(p, "--xdoctest-modules") reprec.assertoutcome(failed=1, passed=1) def test_non_ignored_whitespace(self, testdir): testdir.makeini(""" [pytest] doctest_optionflags = ELLIPSIS """) p = testdir.makepyfile(""" class MyClass: ''' >>> a = "foo " >>> print(a) foo ''' pass """) reprec = testdir.inline_run(p, "--xdoctest-modules") reprec.assertoutcome(failed=1, passed=0) def test_non_ignored_whitespace_glob(self, testdir): testdir.makeini(""" [pytest] doctest_optionflags = ELLIPSIS """) p = testdir.maketxtfile(xdoc=""" >>> a = "foo " >>> print(a) foo """) reprec = testdir.inline_run(p, "--xdoctest-glob=x*.txt", *(EXTRA_ARGS + OLD_TEXT_ARGS)) reprec.assertoutcome(failed=1, passed=0) def test_ignore_import_errors_on_doctest(self, testdir): p = testdir.makepyfile(""" import asdf def add_one(x): ''' >>> add_one(1) 2 ''' return x + 1 """) reprec = testdir.inline_run(p, "--xdoctest-modules", "--xdoctest-ignore-import-errors") reprec.assertoutcome(skipped=1, failed=1, passed=0) def test_unicode_doctest(self, testdir): """ Test case for issue 2434: DecodeError on Python 2 when xdoctest contains non-ascii characters. """ p = testdir.maketxtfile(test_unicode_doctest=""" .. xdoctest:: >>> print( ... "Hi\\n\\nByé") Hi ... Byé >>> 1/0 # Byé 1 """) result = testdir.runpytest(p, "--xdoctest-modules", *(EXTRA_ARGS + OLD_TEXT_ARGS)) result.stdout.fnmatch_lines([ '* REASON: ZeroDivisionError*', '*1 failed*', ]) def test_reportinfo(self, testdir): ''' Test case to make sure that XDoctestItem.reportinfo() returns lineno. ''' p = testdir.makepyfile(test_reportinfo=""" def foo(x): ''' >>> foo('a') 'b' ''' return 'c' """) items, reprec = testdir.inline_genitems(p, '--xdoctest-modules') reportinfo = items[0].reportinfo() assert reportinfo[1] == 1 Erotemic-xdoctest-fac8308/tests/test_preimport.py000066400000000000000000000023131505122333300223220ustar00rootroot00000000000000from os.path import join from xdoctest import utils def test_preimport_skiped_on_disabled_module(): """ If our module has no enabled tests, pre-import should never run. """ from xdoctest import runner import os source = utils.codeblock( ''' raise Exception("DONT IMPORT ME!") def ima_function(): """ Example: >>> # xdoctest: +REQUIRES(env:XDOCTEST_TEST_DOITANYWAY) >>> print('hello') """ ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_bad_preimport.py') with open(modpath, 'w') as file: file.write(source) os.environ['XDOCTEST_TEST_DOITANYWAY'] = '' with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=['']) assert 'Failed to import modname' not in cap.text assert '1 skipped' in cap.text os.environ['XDOCTEST_TEST_DOITANYWAY'] = '1' with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=[]) assert 'Failed to import modname' in cap.text del os.environ['XDOCTEST_TEST_DOITANYWAY'] Erotemic-xdoctest-fac8308/tests/test_pytest_cli.py000066400000000000000000000153611505122333300224670ustar00rootroot00000000000000from xdoctest.utils import util_misc import sys from xdoctest import utils def cmd(command): # simplified version of ub.cmd no fancy tee behavior import subprocess proc = subprocess.Popen( command, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = proc.communicate() ret = proc.wait() info = { 'proc': proc, 'out': out, 'err': err, 'ret': ret, } return info def test_simple_pytest_cli(): module_text = utils.codeblock( ''' def module_func1(): """ This module has a doctest Example: >>> print('hello world') """ ''') temp_module = util_misc.TempModule(module_text) temp_module.print_contents() modpath = temp_module.modpath info = cmd(sys.executable + ' -m pytest --xdoctest ' + modpath) print('COMMAND OUT:') print(info['out']) print('COMMAND ERR:') print(info['err']) print('COMMAND RET:') print(info['ret']) assert info['ret'] == 0 def test_simple_pytest_import_error_cli(): """ This test case triggers an excessively long callback in xdoctest < dev/0.15.7 CommandLine: xdoctest ~/code/xdoctest/tests/test_pytest_cli.py test_simple_pytest_import_error_cli pytest ~/code/xdoctest/tests/test_pytest_cli.py -k test_simple_pytest_import_error_cli -s """ module_text = utils.codeblock( ''' # There are lines before the bad line import os import sys import does_not_exist def module_func1(): """ This module has a doctest Example: >>> print('hello world') """ ''') temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod') temp_module.print_contents() if sys.platform.startswith('win'): info = cmd('(dir 2>&1 *`|echo CMD);&<# rem #>echo PowerShell') print(f'info={info}') print(info['out']) info = cmd(f'dir {temp_module.dpath}') print(f'info={info}') print(info['out']) info = cmd(f'{sys.executable} {temp_module.modpath}') print(f'info={info}') print(info['out']) info = cmd(f'{sys.executable} "{temp_module.modpath}"') print(f'info={info}') print(info['out']) command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest-supress-import-errors --xdoctest ' + temp_module.dpath print('-- PRINT COMMAND 1:') print(command) print('-- RUN COMMAND 1:') info = cmd(command) print('-- COMMAND OUTPUT 1:') print(info['out']) # We patched doctest_example so it no longer outputs this in the traceback assert 'util_import' not in info['out'] print('-- COMMAND RETURN CODE 1:') print(info['ret']) # Note: flaky changes the return code from 1 to 3, so test non-zero assert info['ret'] != 0 # Remove the suppress import error flag and now we should get the traceback temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod') command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest ' + temp_module.dpath print('-- PRINT COMMAND 2:') print(command) print('-- RUN COMMAND 2:') info = cmd(command) print('-- COMMAND OUTPUT 2:') print(info['out']) # We patched doctest_example so it no longer outputs this in the traceback assert 'util_import' in info['out'] print('-- COMMAND RETURN CODE 2:') print(info['ret']) # Note: flaky changes the return code from 1 to 3, so test non-zero assert info['ret'] != 0 def test_simple_pytest_syntax_error_cli(): """ """ module_text = utils.codeblock( ''' &&does_not_exist def module_func1(): """ This module has a doctest Example: >>> print('hello world') """ ''') temp_module = util_misc.TempModule(module_text) info = cmd(sys.executable + ' -m pytest --xdoctest ' + temp_module.dpath) print(info['out']) assert info['ret'] != 0 info = cmd(sys.executable + ' -m pytest --xdoctest ' + temp_module.modpath) print(info['out']) assert info['ret'] != 0 def test_simple_pytest_import_error_no_xdoctest(): """ """ module_text = utils.codeblock( ''' import does_not_exist def test_this(): print('hello world') ''') temp_module = util_misc.TempModule(module_text) info = cmd(sys.executable + ' -m pytest ' + temp_module.modpath) print(info['out']) assert info['ret'] != 0 info = cmd(sys.executable + ' -m pytest ' + temp_module.dpath) print(info['out']) assert info['ret'] != 0 def test_simple_pytest_syntax_error_no_xdoctest(): """ """ module_text = utils.codeblock( ''' &&does_not_exist def test_this(): print('hello world') ''') temp_module = util_misc.TempModule(module_text) info = cmd(sys.executable + ' -m pytest ' + temp_module.modpath) print(info['out']) assert info['ret'] != 0 info = cmd(sys.executable + ' -m pytest ' + temp_module.dpath) print(info['out']) assert info['ret'] != 0 def test_version_and_cli_info(): """ """ import xdoctest info = cmd(sys.executable + ' -m xdoctest --version') assert info['out'].strip() == xdoctest.__version__ info = cmd(sys.executable + ' -m xdoctest --version-info') assert xdoctest.__version__ in info['out'] def test_simple_xdoctest_cli(): module_text = utils.codeblock( ''' def module_func1(): """ This module has a doctest Example: >>> print('hello world') """ ''') temp_module = util_misc.TempModule(module_text) modpath = temp_module.modpath info = cmd(sys.executable + ' -m xdoctest ' + modpath + ' --time') assert 'time:' in info['out'] info = cmd(sys.executable + ' -m xdoctest ' + modpath + ' all') assert 'passed' in info['out'] info = cmd(sys.executable + ' -m xdoctest ' + modpath + ' list') assert 'passed' not in info['out'] info = cmd(sys.executable + ' -m xdoctest ' + modpath + ' --verbose=0') print(repr(info['out'])) assert info['out'].strip() == '' def test_simple_xdoctest_cli_errors(): module_text = utils.codeblock( ''' def module_func1(): """ This module has a doctest Example: >>> raise Exception """ ''') temp_module = util_misc.TempModule(module_text) modpath = temp_module.modpath info = cmd(sys.executable + ' -m xdoctest ' + modpath + ' --time') assert '1 failed' in info['out'] Erotemic-xdoctest-fac8308/tests/test_runner.py000066400000000000000000000302411505122333300216130ustar00rootroot00000000000000from os.path import join from xdoctest import utils def test_zero_args(): """ python tests/test_runner.py test_zero_args """ from xdoctest import runner source = utils.codeblock( ''' # --- HELPERS --- def zero_args1(a=1): pass def zero_args2(*args): pass def zero_args3(**kwargs): pass def zero_args4(a=1, b=2, *args, **kwargs): pass def non_zero_args1(a): pass def non_zero_args2(a, b): pass def non_zero_args3(a, b, *args): pass def non_zero_args4(a, b, **kwargs): pass def non_zero_args5(a, b=1, **kwargs): pass ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_zero_args.py') with open(modpath, 'w') as file: file.write(source) zero_func_names = { example.callname for example in runner._gather_zero_arg_examples(modpath) } assert zero_func_names == set(['zero_args1', 'zero_args2', 'zero_args3', 'zero_args4']) def test_list(): from xdoctest import runner source = utils.codeblock( ''' # --- HELPERS --- def real_test1(a=1): """ Example: >>> pass """ pass def fake_test1(a=1): pass def real_test2(): """ Example: >>> pass """ pass def fake_test2(): pass ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_list.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'list', argv=['']) assert 'real_test1' in cap.text assert 'real_test2' in cap.text assert 'fake_test1' not in cap.text assert 'fake_test2' not in cap.text # test command=None with utils.CaptureStdout() as cap: runner.doctest_module(modpath, None, argv=['']) assert 'real_test1' in cap.text assert 'real_test2' in cap.text assert 'fake_test1' not in cap.text assert 'fake_test2' not in cap.text def test_example_run(): from xdoctest import runner source = utils.codeblock( ''' def foo(): """ Example: >>> print('i wanna see this') """ ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'foo', argv=['']) assert 'i wanna see this' in cap.text def test_durations(): from xdoctest import runner source = utils.codeblock( ''' def func1(): """ Example: >>> print(1) """ def func2(): """ Example: >>> print(123) """ ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_durations.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap1: runner.doctest_module(modpath, 'all', argv=[''], durations=10) with utils.CaptureStdout() as cap2: runner.doctest_module(modpath, 'all', argv=[''], durations=1) assert cap1.text.count('time: ') == 2, '2 tests should have 2 durations' assert cap2.text.count('time: ') == 1, 'should only have gotten 1 durration' def test_dump(): from xdoctest import runner source = utils.codeblock( ''' def func1(): """ Example: >>> these = 'tests will be converted to unit tests' >>> print(these + ' because sometimes you wanna') """ def func2(): """ Example: >>> for i in range(10): >>> these = 'and sometimes your doctests should have been ' >>> for j in range(10): ... print(these + ' unit tests all along') """ ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_durations.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'dump', argv=['']) print(cap.text) def test_all_disabled(): """ pytest tests/test_runner.py::test_all_disabled -s -vv python tests/test_runner.py test_all_disabled """ from xdoctest import runner source = utils.codeblock( ''' def foo(): """ Example: >>> # DISABLE_DOCTEST >>> print('all will' + ' not print this') """ def bar(): """ Example: >>> print('all will' + ' print this') """ ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_all_disabled.py') with open(modpath, 'w') as file: file.write(source) # disabled tests dont run in "all" mode with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'all', argv=['']) assert 'all will print this' in cap.text # print(' ' + cap.text.replace('\n', '\n ')) assert 'all will not print this' not in cap.text # Running an disabled example explicitly should work with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'foo', argv=['']) # print(' ' + cap.text.replace('\n', '\n ')) assert 'all will not print this' in cap.text def test_runner_failures(): """ python tests/test_runner.py test_runner_failures pytest tests/test_runner.py::test_runner_failures -s pytest tests/test_runner.py::test_all_disabled -s """ from xdoctest import runner source = utils.codeblock( ''' def test1(): """ Example: >>> pass """ def test2(): """ Example: >>> assert False, 'test 2.1' Example: >>> assert False, 'test 2.2' """ def test3(): """ Example: >>> pass Example: >>> pass """ def test4(): """ Example: >>> assert False, 'test 3' """ ''') temp = utils.TempDir() temp.ensure() # with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_runner_failures.py') with open(modpath, 'w') as file: file.write(source) # disabled tests dont run in "all" mode with utils.CaptureStdout(suppress=True) as cap: try: runner.doctest_module(modpath, 'all', argv=[''], verbose=1) except Exception: pass print('\nNOTE: the following output is part of a test') print(utils.indent(cap.text, '... ')) print('NOTE: above output is part of a test') # assert '.FFF' in cap.text assert '3 / 6 passed' in cap.text assert '3 failed' in cap.text assert '3 passed' in cap.text def test_run_zero_arg(): """ pytest tests/test_runner.py::test_run_zero_arg -s """ from xdoctest import runner source = utils.codeblock( ''' def zero_arg_print(): print('running zero arg') ''') with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_run_zero_arg.py') with open(modpath, 'w') as file: file.write(source) # disabled tests dont run in "all" mode with utils.CaptureStdout() as cap: try: runner.doctest_module(modpath, 'all', argv=[''], verbose=3) except Exception: pass assert 'running zero arg' not in cap.text with utils.CaptureStdout() as cap: try: runner.doctest_module(modpath, 'zero_arg_print', argv=[''], verbose=3) except Exception: pass # print(cap.text) assert 'running zero arg' in cap.text def test_parse_cmdline(): """ pytest tests/test_runner.py::test_parse_cmdline -s """ from xdoctest import runner # sys.argv could be anything, so just run this for coverage to make sure it doesnt crash runner._parse_commandline(command=None, style=None, verbose=None, argv=None) # check specifying argv changes style assert 'freeform' == runner._parse_commandline(command=None, style=None, verbose=None, argv=['--freeform'])[1] assert 'google' == runner._parse_commandline(command=None, style=None, verbose=None, argv=['--google'])[1] assert None is runner._parse_commandline(command=None, style=None, verbose=None, argv=['--google'])[0] def test_runner_config(): """ pytest tests/test_runner.py::test_runner_config -s """ from xdoctest import runner source = utils.codeblock( ''' def foo(): """ Example: >>> print('i wanna see this') """ ''') config = { 'default_runtime_state': {'SKIP': True}, } with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'foo', argv=[''], config=config) assert 'SKIPPED' in cap.text def test_global_exec(): """ pytest tests/test_runner.py::test_global_exec -s """ from xdoctest import runner source = utils.codeblock( ''' def foo(): """ Example: >>> print(a) """ ''') config = { 'global_exec': 'a=1', } with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'foo', argv=[''], config=config) assert '1 passed' in cap.text def test_hack_the_sys_argv(): """ Tests hacky solution to issue #76 NOTE: in version 1.0.2 this hack no longer works! pytest tests/test_runner.py::test_hack_the_sys_argv -s References: https://github.com/Erotemic/xdoctest/issues/76 """ from xdoctest import runner source = utils.codeblock( ''' def foo(): """ Example: >>> # xdoctest: +REQUIRES(--hackedflag) >>> print('This will run if global_exec specified') """ ''') import sys NEEDS_FIX = '--hackedflag' not in sys.argv config = { 'global_exec': 'import sys; sys.argv.append("--hackedflag")' } with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_example_run.py') with open(modpath, 'w') as file: file.write(source) with utils.CaptureStdout() as cap: runner.doctest_module(modpath, 'foo', argv=[''], config=config) if 0 and NEEDS_FIX: # Fix the global state sys.argv.remove('--hackedflag') # print(cap.text) assert '1 skipped' in cap.text # assert '1 passed' in cap.text if __name__ == '__main__': """ CommandLine: pytest tests/test_runner.py -s pytest tests/test_runner.py -s python tests/test_runner.py test_zero_args """ # import pytest # pytest.main([__file__]) import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_static.py000066400000000000000000000066101505122333300215740ustar00rootroot00000000000000from xdoctest import static_analysis as static from xdoctest import utils def test_module_docstr(): source = utils.codeblock( ''' # comment """ module level docstr """ def foo(): """ other docstr """ ''') self = static.TopLevelVisitor.parse(source) assert '__doc__' in self.calldefs def test_lineno(): source = utils.codeblock( ''' def foo(): """ multiline 0-1-0 """ def subfunc(): pass def bar(): """ multiline 1-1-1 """ pass def baz(): """ multiline 0-1-1 """ def biz(): """ multiline 1-1-0 """ class Spam: """ multiline 0-2-1 --- """ def eggs(): """ multiline 0-2-0 ---""" pass ''') self = static.TopLevelVisitor.parse(source) calldefs = self.calldefs sourcelines = source.split('\n') for k, calldef in calldefs.items(): line = sourcelines[calldef.lineno - 1] callname = calldef.callname # Ensure linenumbers correspond with start of func/class def assert callname.split('.')[-1] in line docsrc_lines = sourcelines[calldef.doclineno - 1:calldef.doclineno_end] # Ensure linenumbers correspond with start and end of doctest assert docsrc_lines[0].strip().startswith('"""') assert docsrc_lines[-1].strip().endswith('"""') def test_mod_lineno2(): source = utils.codeblock( ''' class Fun: #1 @property def test(self): """ # 4 >>> a = 1 >>> 1 / 0 """ def nodec1(self): # 9 pass def nodec2(self, # 12 x=y): """ # 14 >>> d = 1 """ # 16 @decor # 18 def decor1(self): # 19 pass @decor() def decor2(self): pass @decor( foo=bar ) def decor3(self): # 29 """ >>> d = 3 """ @decor( foo=bar # 35 ) # 36 def decor4(self): # 37 ">>> print(1)" # 38 ''') # import ast from xdoctest.static_analysis import TopLevelVisitor # source_utf8 = source.encode('utf8') # pt = ast.parse(source_utf8) # node = pt.body[0].body[0] self = TopLevelVisitor.parse(source) calldefs = self.calldefs assert calldefs['Fun'].lineno == 1 assert calldefs['Fun.test'].lineno == 3 assert calldefs['Fun.test'].doclineno == 4 assert calldefs['Fun.test'].doclineno_end == 7 assert calldefs['nodec1'].doclineno is None assert calldefs['nodec2'].doclineno == 14 assert calldefs['nodec2'].doclineno_end == 16 assert calldefs['decor3'].doclineno == 30 assert calldefs['decor3'].doclineno_end == 32 assert calldefs['decor4'].doclineno == 38 assert calldefs['decor4'].doclineno_end == 38 if __name__ == '__main__': """ CommandLine: python -B %HOME%/code/xdoctest/tests/test_static.py all pytest ~/code/xdoctest/tests/test_static.py """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tests/test_traceback.py000066400000000000000000000150531505122333300222250ustar00rootroot00000000000000""" Need to enhance the tracebacks to spit out something more useful """ from xdoctest import utils from xdoctest.utils.util_misc import _run_case def test_fail_call_onefunc(): import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") text = _run_case(utils.codeblock( ''' def func(a): """ Example: >>> a = 1 >>> func(a) """ a = []() return a ''')) assert '>>> func(a)' in text assert 'rel: 2, abs: 5' in text def test_fail_call_twofunc(): """ python ~/code/xdoctest/tests/test_traceback.py test_fail_call_twofunc """ import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") text = _run_case(utils.codeblock( ''' def func(a): """ Example: >>> a = 1 >>> func(a) """ a = []() return a def func2(a): """ Example: >>> pass """ pass ''')) assert text assert '>>> func(a)' in text assert 'rel: 2, abs: 5,' in text def test_fail_inside_twofunc(): """ python ~/code/xdoctest/tests/test_traceback.py test_fail_inside_twofunc """ import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") text = _run_case(utils.codeblock( ''' def func(a): """ Example: >>> print('not failed') >>> # just a comment >>> print(("foo" ... "bar")) >>> a = []() >>> func(a) """ return a def func2(a): """ Example: >>> pass """ pass ''')) assert text assert '>>> a = []()' in text assert 'rel: 5, abs: 8' in text def test_fail_inside_onefunc(): """ python ~/code/xdoctest/tests/test_traceback.py test_fail_inside_onefunc """ text = _run_case(utils.codeblock( ''' def func(a): """ Example: >>> x = 1 >>> # just a comment >>> print(("foo" ... "bar")) foobar >>> a = []() >>> func(a) """ return a ''')) assert text assert '>>> a = []()' in text assert 'rel: 6, abs: 9,' in text def test_failure_linenos(): """ Example: python ~/code/xdoctest/tests/test_linenos.py test_failure_linenos Example: >>> test_failure_linenos() """ text = _run_case(utils.codeblock( r''' def bar(a): return a class Foo: @bar @staticmethod def func(a): """ Example: >>> # Perform some passing tests before we call failing code >>> Foo.func(0) 0 >>> # call the failing code >>> if True: >>> assert 1 == 2 >>> # Do stuff that wont be executed >>> Foo.func(0) 0 >>> Foo.func(1) 1 """ return a ''')) assert 'line 15' in text assert 'line 6' in text assert text # There are three different types of traceback failure # (1) failure of code within the doctest # (2) failure of code called by the doctest # (3) failure of doctest got/want syntax # TODO: Add checks on the line numbers reported in the tracebacks for these # function. # TODO: Check that the formatting of the tracebacks for each case are user # friendly """ SeeAlso: # This plugin tests also checks line numbers. Make sure we dont break it pytest tests/test_plugin.py::TestXDoctest::test_doctest_property_lineno -v -s """ def test_lineno_failcase_gotwant(): """ python ~/code/xdoctest/tests/test_linenos.py test_lineno_failcase_gotwant """ text = _run_case(utils.codeblock( ''' def func(a): """ Example: >>> got = func('foo') >>> print(got) bar """ return a ''')) assert text assert 'line 3' in text assert 'line 6' in text def test_lineno_failcase_called_code(): """ python ~/code/xdoctest/tests/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/tests/test_linenos.py """ text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call the failing code >>> func(3) """ if a > 0: nested_failure(a) return a def nested_failure(a): if a > 0: nested_failure(a - 1) else: raise Exception('fail case') ''')) assert 'rel: 6, abs: 9,' in text assert text def test_lineno_failcase_doctest_code(): """ python ~/code/xdoctest/tests/test_linenos.py test_lineno_failcase_doctest_code """ text = _run_case(utils.codeblock( r''' def bar(): pass def func(a): """ Example: >>> # Perform some passing tests before we call failing code >>> func(0) 0 >>> # call the failing code >>> assert 1 == 2 >>> # Do stuff that wont be executed >>> func(0) 0 >>> func(1) 1 """ return a ''')) assert 'rel: 5, abs: 11,' in text assert text if __name__ == '__main__': """ CommandLine: export PYTHONPATH=$PYTHONPATH:/home/joncrall/code/xdoctest/tests python ~/code/xdoctest/tests/test_traceback.py pytest ~/code/xdoctest/tests/test_traceback.py -s """ import xdoctest xdoctest.doctest_module(__file__) Erotemic-xdoctest-fac8308/tox.ini000066400000000000000000000007351505122333300170470ustar00rootroot00000000000000# tox (https://tox.readthedocs.io/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] ;envlist = pypy3, py27, py35 envlist = pypy, py27, py34, py35 [testenv] commands = pytest # {posargs: tests} # isort --check-only --diff --recursive --skip .tox --skip migrations # flake8 deps = -r requirements.txt