pax_global_header00006660000000000000000000000064147743371600014526gustar00rootroot0000000000000052 comment=1ceee48b9aa3a947427608a01e3d8fb2376e9796 breezy-3.3.11/000077500000000000000000000000001477433716000131135ustar00rootroot00000000000000breezy-3.3.11/.bzrignore000066400000000000000000000025721477433716000151230ustar00rootroot00000000000000*.py[oc] # These are created as byproducts of our test suite ./test*.tmp ./.python-eggs ./breezy.egg-info ./.bzr.log # Generated files CHANGELOG # generated documents brz.1 MANIFEST ./doc/*.html ./doc/*/_build/ ./doc/*/Makefile ./doc/*/make.bat ./tutorial.html ./build_doc_website ./html_docs ./pretty_docs ./api ./doc/**/*.html ./doc/developers/performance.png ./doc/en/user-reference/*.txt ./doc/en/release-notes/index.txt ./doc/en/release-notes/NEWS.txt BRANCH-INFO # setup.py working directory ./build ./build-win32 ./breezy/locale # Editor temporary/working/backup files *$ .*.sw[nop] .sw[nop] *~ [#]*# .#* ./tags ./breezy/tags ./TAGS # The shelf plugin uses this dir ./.shelf # Mac droppings .DS_Store # win32 installer generated files ./doc/*.html ./doc/brz_man.txt ./py2exe.log ./tools/win32/bzr.iss ./dist # performance history data file ./.perf_history # Pyrex breezy/_annotator_pyx.c breezy/_bencode_pyx.c breezy/bzr/_btree_serializer_pyx.c breezy/bzr/_chk_map_pyx.c breezy/_chunks_to_lines_pyx.c breezy/bzr/_dirstate_helpers_pyx.c breezy/bzr/_groupcompress_pyx.c breezy/bzr/_knit_load_data_pyx.c breezy/bzr/_rio_pyx.c breezy/_known_graph_pyx.c breezy/_readdir_pyx.c # built extension modules breezy/_*.dll breezy/_*.so breezy/_*.pyd ./.ccache .testrepository selftest.log .coverage doc/developers/api/*.txt __pycache__ .mypy_cache # rust bits ./target ./Cargo.lock ./brz locale .ruff_cache breezy-3.3.11/.coveragerc000066400000000000000000000001341477433716000152320ustar00rootroot00000000000000[run] branch = True source = breezy [report] exclude_lines = raise NotImplementedError breezy-3.3.11/.dockerignore000066400000000000000000000000161477433716000155640ustar00rootroot00000000000000Dockerfile *~ breezy-3.3.11/.flake8000066400000000000000000000011361477433716000142670ustar00rootroot00000000000000[flake8] # Ignore E402 ("module level import not at top of file"), # because even with the lazy import plugin it still triggers # for lazy_import statements before other imports. exclude = .git,__pycache__,build,dist,target,.eggs,lib ignore = D I E12 E203 E261 E265 E266 E301 E302 E303 E305 E306 E401 E402 E501 E502 E702 E704 E722 E731 E741 F401 F402 F403 F405 F811 F812 F821 F841 W391 W503 W504 W605 filename = *.py [flake8:local-plugins] extension = MC1 = flake8_lazy_import:LazyImport paths = ./tools/ breezy-3.3.11/.github/000077500000000000000000000000001477433716000144535ustar00rootroot00000000000000breezy-3.3.11/.github/dependabot.yaml000066400000000000000000000012221477433716000174410ustar00rootroot00000000000000# Keep GitHub Actions up to date with GitHub's Dependabot... # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem version: 2 updates: - package-ecosystem: "cargo" directory: "/" schedule: interval: "weekly" rebase-strategy: "disabled" - package-ecosystem: "github-actions" directory: "/" schedule: interval: weekly - package-ecosystem: "pip" directory: "/" schedule: interval: weekly breezy-3.3.11/.github/gpg-error-config000077500000000000000000000005601477433716000175510ustar00rootroot00000000000000#!/bin/sh # gpg-error-config: simple replacement gpg-error-config that is a shim # for pkg-config. # Parse flags for arg in "$@"; do case $arg in --cflags) pkg-config --cflags gpg-error ;; --libs) pkg-config --libs gpg-error ;; --version) pkg-config --modversion gpg-error ;; *) echo "Unknown option: $arg" >&2 exit 1 ;; esac done breezy-3.3.11/.github/gpgme-config000077500000000000000000000007771477433716000167560ustar00rootroot00000000000000#!/bin/bash # Parse gpgme-config-like flags, then invoke `pkg-config gpgme`: # * Pass --cflags and --libs through # * Map --version to --modversion # * Ignore --thread=pthread # Parse flags for arg in "$@"; do case "$arg" in --cflags|--libs|--modversion) flags="$flags $arg" ;; --version) flags="$flags --modversion" ;; --thread=pthread) ;; --prefix) flags="$flags --variable=prefix" ;; *) echo "Unknown flag: $arg" >&2 exit 1 ;; esac done exec pkg-config gpgme $flags breezy-3.3.11/.github/workflows/000077500000000000000000000000001477433716000165105ustar00rootroot00000000000000breezy-3.3.11/.github/workflows/disperse.yml000066400000000000000000000002741477433716000210540ustar00rootroot00000000000000--- name: Disperse configuration "on": - push jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jelmer/action-disperse-validate@v2 breezy-3.3.11/.github/workflows/pythonpackage.yml000066400000000000000000000060371477433716000220760ustar00rootroot00000000000000name: Python package on: [push, pull_request] env: PYO3_USE_ABI3_FORWARD_COMPATIBILITY: "1" jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest] python-version: [3.9, "3.10", "3.11", '3.12', '3.13'] # See https://github.com/actions/toolkit/issues/399 # include: # - os: ubuntu-latest # python-version: pypy3 # experimental: true fail-fast: false steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies (apt) run: | sudo apt install quilt if: "matrix.os == 'ubuntu-latest'" - name: Install dependencies (brew) run: | brew install quilt if: "matrix.os == 'macos-latest'" - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install -U pip "setuptools>=60" setuptools-gettext cython setuptools-rust python -m pip install -U pip coverage codecov cython testscenarios git+https://github.com/dulwich/dulwich python -m pip install ".[dev,paramiko,doc,launchpad,git,fastimport]" - name: Install dependencies (linux/pip) run: | sudo apt install libgpgme-dev libgpg-error-dev mkdir -p "$HOME/.local/bin" cp .github/gpgme-config "$HOME/.local/bin/gpgme-config" cp .github/gpg-error-config "$HOME/.local/bin/gpg-error-config" echo "$HOME/.local/bin" >> $GITHUB_PATH pip install pyinotify pip install ".[workspace,pgp]" if: "matrix.os == 'ubuntu-latest'" - name: Build docs run: | make docs PYTHON=python - name: Build extensions run: | make extensions PYTHON=python if: "matrix.python-version != 'pypy3'" - name: Test suite run (Linux) run: | ./brz selftest env: PYTHONHASHSEED: random BRZ_PLUGIN_PATH: -site:-user PYTHONPATH: . if: "matrix.os == 'ubuntu-latest'" - name: Test suite run (Mac OS) run: | ./brz selftest --subunit2 | subunit-filter --fixup-expected-failures=xfail.macos -s --passthrough | subunit2pyunit env: PYTHONHASHSEED: random BRZ_PLUGIN_PATH: -site:-user PYTHONPATH: . if: "matrix.os == 'macos-latest'" - name: Test suite run (Windows) run: | python -m breezy selftest --subunit2 | subunit-filter --fixup-expected-failures=xfail.windows -s --passthrough | subunit2pyunit env: PYTHONHASHSEED: random BRZ_PLUGIN_PATH: -site:-user PYTHONPATH: . shell: bash if: "matrix.os == 'windows-latest'" - name: Run mypy run: | python -m pip install mypy types-PyYAML types-paramiko types-setuptools python -m mypy breezy if: "matrix.python-version != 'pypy3'" breezy-3.3.11/.github/workflows/wheels.yaml000066400000000000000000000142121477433716000206630ustar00rootroot00000000000000name: Build Python Wheels on: push: pull_request: schedule: - cron: "0 6 * * *" # Daily 6AM UTC build jobs: define-matrix: runs-on: ubuntu-latest outputs: matrix: ${{ steps.merged-identifiers.outputs.merged-identifiers }} steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: 3.x cache: pip - name: Install jq run: sudo apt-get update && sudo apt-get install -y jq - name: Install cibuildwheel run: pip install cibuildwheel - name: Find build identifiers using cibuildwheel --print-build-identifiers id: all-build-identifiers run: | echo "linux=$(cibuildwheel --platform linux --print-build-identifiers | tr '\n' ' ')" >> $GITHUB_OUTPUT echo "macos=$(cibuildwheel --platform macos --print-build-identifiers | tr '\n' ' ')" >> $GITHUB_OUTPUT echo "windows=$(cibuildwheel --platform windows --print-build-identifiers | tr '\n' ' ')" >> $GITHUB_OUTPUT - name: Select build identifiers id: select-build-identifiers run: | if [[ "$GITHUB_REF" = "refs/heads/main" ]] || [[ "$GITHUB_REF" = "refs/heads/master" ]] || [[ "$GITHUB_REF" = "refs/tags/"* ]]; then echo 'linux=${{ steps.all-build-identifiers.outputs.linux }}' >> $GITHUB_OUTPUT echo 'windows=${{ steps.all-build-identifiers.outputs.windows }}' >> $GITHUB_OUTPUT echo 'macos=${{ steps.all-build-identifiers.outputs.macos }}' >> $GITHUB_OUTPUT else echo "linux=$(echo -n '${{ steps.all-build-identifiers.outputs.linux }}' | awk '{print $NF}')" >> $GITHUB_OUTPUT echo "macos=$(echo -n '${{ steps.all-build-identifiers.outputs.macos }}' | awk '{print $NF}')" >> $GITHUB_OUTPUT echo "windows=$(echo -n '${{ steps.all-build-identifiers.outputs.windows }}' | awk '{print $NF}')" >> $GITHUB_OUTPUT fi - name: Output build identifiers id: json-identifiers run: | echo "linux=$(echo -n '${{ steps.select-build-identifiers.outputs.linux }}' | jq -R -s -c 'split(" ") | map(select(length > 0)) | [.[] | {os: "ubuntu-latest", "build-identifier": .}]')" >> $GITHUB_OUTPUT echo "macos=$(echo -n '${{ steps.select-build-identifiers.outputs.macos }}' | jq -R -s -c 'split(" ") | map(select(length > 0)) | [.[] | {os: "macos-latest", "build-identifier": .}]')" >> $GITHUB_OUTPUT echo "windows=$(echo -n '${{ steps.select-build-identifiers.outputs.windows }}' | jq -R -s -c 'split(" ") | map(select(length > 0)) | [.[] | {os: "windows-latest", "build-identifier": .}]')" >> $GITHUB_OUTPUT - name: Merge build identifiers id: merged-identifiers run: | echo merged-identifiers=$(echo -n '${{ steps.json-identifiers.outputs.linux }} ${{ steps.json-identifiers.outputs.macos }} ${{ steps.json-identifiers.outputs.windows }}' | jq -c -s 'add') >> $GITHUB_OUTPUT build-wheels: runs-on: ${{ matrix.os }} needs: define-matrix strategy: matrix: include: ${{ fromJSON(needs.define-matrix.outputs.matrix ) }} fail-fast: true steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 - name: Install native dependencies (Ubuntu) run: sudo apt-get update && sudo apt-get install -y libgpgme-dev libgpg-error-dev if: "matrix.os == 'ubuntu-latest'" - name: set up rust if: matrix.os != 'ubuntu' uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: nightly override: true - name: Install native dependencies (MacOS) run: brew install swig gpgme if: "matrix.os == 'macos-latest'" - name: Install dependencies run: | python -m pip install --upgrade pip pip install setuptools wheel cibuildwheel - name: Provide gpgme-config and gpg-error-config if: "matrix.os == 'ubuntu-latest'" run: | mkdir -p "$HOME/.local/bin" cp .github/gpgme-config "$HOME/.local/bin/gpgme-config" cp .github/gpg-error-config "$HOME/.local/bin/gpg-error-config" echo "$HOME/.local/bin" >> $GITHUB_PATH - name: Install gpg on supported platforms run: pip install -U gpg if: "matrix.os != 'windows-latest'" - name: Set up QEMU uses: docker/setup-qemu-action@v3 if: "matrix.os == 'ubuntu-latest'" - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse env: CIBW_ARCHS_MACOS: x86_64 arm64 universal2 CIBW_BUILD: "${{ matrix.build-identifier }}*" CIBW_ENVIRONMENT: 'PATH="$HOME/.cargo/bin:$PATH" PYO3_USE_ABI3_FORWARD_COMPATIBILITY="1" MACOSX_DEPLOYMENT_TARGET="10.12"' CIBW_BEFORE_BUILD: > pip install -U setuptools-rust && curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain=nightly --profile=minimal -y && rustup show CIBW_BEFORE_BUILD_LINUX: > pip install -U setuptools-rust && yum install libatomic -y && curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain=nightly --profile=minimal -y && rustup show CIBW_BEFORE_BUILD_MACOS: > pip install -U setuptools-rust && curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain=nightly --profile=minimal -y && rustup target add x86_64-apple-darwin && rustup show - name: Upload wheels uses: actions/upload-artifact@v4 with: name: artifact-${{ matrix.build-identifier }} path: ./wheelhouse/*.whl publish: runs-on: ubuntu-latest needs: build-wheels if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/brz-') permissions: id-token: write environment: name: pypi url: https://pypi.org/p/breezy steps: - name: Download distributions uses: actions/download-artifact@v4 with: merge-multiple: true pattern: artifact-* path: dist - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 breezy-3.3.11/.gitignore000066400000000000000000000004741477433716000151100ustar00rootroot00000000000000__pycache__ *.pyc build/ *_pyx.so *_pyx.c *_pyx.h *_pyx_api.h *_pyx.cpython-*.so *_c.cpython-*.so *_c.so *_pyx.cpython-*.c *~ /target /brz /Cargo.lock *.cpython-*.so .testrepository selftest.log doc/en/user-reference/*.txt doc/en/_build/ doc/developers/_build/ .*.swp doc/developers/Makefile doc/developers/make.bat breezy-3.3.11/.mailmap000066400000000000000000000004531477433716000145360ustar00rootroot00000000000000Jelmer Vernooij Jelmer Vernooij Jelmer Vernooij INADA Naoki Martin Packman breezy-3.3.11/.rsyncexclude000066400000000000000000000003131477433716000156210ustar00rootroot00000000000000*.pyc *.pyo *~ # arch can bite me {arch} .arch-ids ,,* ++* /doc/*.html *.tmp bzr-test.log [#]*# .#* testrev.* /tmp # do want this after all + CHANGELOG /build test*.tmp .*.swp *.orig .*.orig .bzr-shelf* breezy-3.3.11/.testr.conf000066400000000000000000000003041477433716000151760ustar00rootroot00000000000000[DEFAULT] test_command=PYTHONPATH=`pwd`:$PYTHONPATH BRZ_PLUGIN_PATH=-site:-user python3 -m breezy selftest --subunit2 $IDOPTION $LISTOPT test_id_option=--load-list $IDFILE test_list_option=--list breezy-3.3.11/BRANCH.TODO000066400000000000000000000002261477433716000145370ustar00rootroot00000000000000# This file is for listing TODOs for branches that are being worked on. # It should ALWAYS be empty in the mainline or in integration branches. # # breezy-3.3.11/CODE_OF_CONDUCT.md000066400000000000000000000064271477433716000157230ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at core@breezy-vcs.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq breezy-3.3.11/COPYING.txt000066400000000000000000000432541477433716000147740ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. breezy-3.3.11/Cargo.toml000066400000000000000000000012001477433716000150340ustar00rootroot00000000000000[package] name = "breezy" version = { workspace = true } authors = [ "Martin Packman ", "Jelmer Vernooij "] edition = "2018" publish = false default-run = "brz" license = "GPL-2.0+" description = "Friendly distributed version control system" repository = "https://github.com/breezy-team/breezy" homepage = "https://www.breezy-vcs.org/" documentation = "https://www.breezy-vcs.org/doc/" [workspace] members = ["lib-rio"] [workspace.package] version = "3.3.11" [[bin]] name = "brz" path = "breezy/main.rs" [dependencies] pyo3 = { workspace = true } [workspace.dependencies] pyo3 = ">=0.23,<0.24" breezy-3.3.11/Dockerfile000066400000000000000000000010171477433716000151040ustar00rootroot00000000000000FROM debian:sid-slim AS build RUN apt -y update && apt -y install cython3 python3-setuptools python3-setuptools-rust python3-configobj python3-dulwich python3-urllib3 python3-merge3 python3-patiencediff python3-fastbencode python3-yaml COPY . . RUN python3 setup.py install FROM debian:sid RUN apt -y update && apt -y install python3 python3-configobj python3-dulwich python3-urllib3 python3-merge3 python3-patiencediff python3-fastbencode python3-yaml COPY --from=build /usr/local /usr/local ENTRYPOINT ["/usr/local/bin/brz"] breezy-3.3.11/INSTALL000066400000000000000000000043201477433716000141430ustar00rootroot00000000000000Breezy install instructions *************************** Dependencies ------------ Breezy requires Python 3.6 or newer as well as the rust compiler. It also requires the `setuptools`, `setuptools_rust`, `configobj`, `fastbencode` and `patiencediff` Python modules to be installed. These can be installed either from your operating system's package manager, using pip or by downloading them from: configobj: https://github.com/DiffSK/configobj patiencediff: https://github.com/breezy-team/patiencediff fastbencode: https://github.com/breezy-team/fastbencode Optional dependencies ~~~~~~~~~~~~~~~~~~~~~ If you wish to access branches over sftp, you will need paramiko and pycrypto: http://www.lag.net/paramiko/ To PGP sign commits and verify PGP signatures on commits, install python-gpgme. For Git support, install Dulwich: https://www.dulwich.io/ For fastimport support, install python-fastimport: https://github.com/jelmer/python-fastimport brz can optionally use compiled versions of some parts of the code for increased speed. When installing brz you need the ability to build C extensions. Some GNU/Linux distributions package the necessary headers separately from the main Python package. This package is probably named something like python-dev or python-devel. FreeBSD, Windows, source-based GNU/Linux distributions, and possibly other operating systems, have the required files installed by default. If you are installing brz from a brz branch rather than a release tarball, then you should also have the Cython package installed. This is not necessary for release tarballs as they already contain the C files that Cython is needed to create. http://www.cython.org/ Installation ------------ When upgrading using setup.py, it is recommended that you first delete the bzrlib directory from the install target. To install brz as a user, run python setup.py install --home ~ To install system-wide, run (as root) python setup.py install For more information on installation, see for the Bazaar installation FAQ (that also applies to Breezy) or write to bazaar@lists.canonical.com mentioning you use Breezy, or ask a question at . breezy-3.3.11/MANIFEST.in000066400000000000000000000017011477433716000146500ustar00rootroot00000000000000include README.rst setup.py COPYING.txt # FIXME: Not needed, remove after 2.7.0 -- vila 2016-02-07 include BRANCH.TODO INSTALL Makefile MANIFEST.in NEWS profile_imports.py .rsyncexclude .testr.conf TODO tools/brz_epydoc tools/packaging/lp-upload-release tools/subunit-sum breezy/plugins/news_merge/README breezy/plugins/po_merge/README breezy/tests/ssl_certs/ca.key breezy/tests/ssl_certs/server.csr breezy/tests/ssl_certs/server.extensions.cnf SECURITY.md CODE_OF_CONDUCT.md # bzr export still create some empty dirs that need to be removed # breezy/plugins/weave_fmt/tests/ breezy/store/revision/ doc/ja/_templates/ man1/ man1 recursive-include tools/win32 * recursive-include breezy *.py *.pyx *.pxd *.txt *.c *.h *.rs recursive-include tools *.py *.sh recursive-include apport * recursive-include contrib * recursive-include doc * recursive-include po * recursive-include lib-* Cargo.toml *.rs include Cargo.lock include Cargo.toml include breezy/py.typed breezy-3.3.11/Makefile000066400000000000000000000242661477433716000145650ustar00rootroot00000000000000# Copyright (C) 2005-2012, 2016, 2017 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # A relatively simple Makefile to assist in building parts of brz. Mostly for # building documentation, etc. ### Core Stuff ### SHELL=bash PYTHON?=python3 PYTHON3?=python3 BRZ_TARGET=release PLUGIN_TARGET=plugin-release PYTHON_BUILDFLAGS= BRZ_PLUGIN_PATH=-site:-user # Shorter replacement for $(sort $(wildcard )) as $(call sw,) sw = $(sort $(wildcard $(1))) .PHONY: all clean realclean extensions flake8 api-docs check-nodocs check all: extensions extensions: @echo "building extension modules." $(PYTHON) setup.py build_ext -i $(PYTHON_BUILDFLAGS) check:: docs check-nodocs check-nodocs: brz -$(RM) -f selftest.log echo `date` ": selftest starts" 1>&2 set -o pipefail; BRZ_PLUGIN_PATH=$(BRZ_PLUGIN_PATH) \ ./brz selftest -Oselftest.timeout=120 --strict \ --subunit2 $(tests) | tee selftest.log | subunit2pyunit echo `date` ": selftest ends" 1>&2 # An empty log file should catch errors in the $(PYTHON3) # command above (the '|' swallow any errors since 'make' # sees the 'tee' exit code for the whole line if [ ! -s selftest.log ] ; then exit 1 ; fi # Check that there were no errors reported. subunit-stats < selftest.log check-ci: docs extensions brz # FIXME: Remove -Wignore::FutureWarning once # https://github.com/paramiko/paramiko/issues/713 is not a concern # anymore -- vila 2017-05-24 set -o pipefail; \ BRZ_PLUGIN_PATH=$(BRZ_PLUGIN_PATH) \ ./brz selftest -v --parallel=fork -Oselftest.timeout=120 --subunit2 brz: $(PYTHON) setup.py build_rust -i $(PYTHON_BUILDFLAGS) # Run Python style checker (apt-get install flake8) # # Note that at present this gives many false warnings, because it doesn't # know about identifiers loaded through lazy_import. flake8: $(PYTHON) -m flake8 breezy mypy: $(PYTHON) -m mypy breezy clean: $(PYTHON) setup.py clean -find . -name "*.pyc" -o -name "*.pyo" -o -name "*.so" | xargs rm -f realclean: clean # Remove files which are autogenerated but included by the tarball. rm -f breezy/*_pyx.c breezy/bzr/*_pyx.c # build tags for emacs and vim TAGS: ctags -R -e breezy tags: ctags -R breezy # these are treated as phony so they'll always be rebuilt - it's pretty quick .PHONY: TAGS tags ### Documentation ### docs: docs-sphinx clean-docs: clean-sphinx html-docs: html-sphinx ### Man-page Documentation ### MAN_DEPENDENCIES = breezy/builtins.py \ $(call sw,breezy/*.py) \ $(call sw,breezy/*/*.py) \ tools/generate_docs.py \ $(call sw,$(addsuffix /*.txt, breezy/help_topics/en)) MAN_PAGES = man1/brz.1 man1/brz.1: $(MAN_DEPENDENCIES) mkdir -p $(dir $@) $(PYTHON) tools/generate_docs.py -o $@ man ### Sphinx-style Documentation ### # Build the documentation. To keep the dependencies down to a minimum # for distro packagers, we only build the html documentation by default. # Sphinx 0.6 or later is preferred for the best rendering, though # Sphinx 0.4 or later should work. See http://sphinx.pocoo.org/index.html # for installation instructions. docs-sphinx: html-sphinx # Clean out generated documentation clean-sphinx: $(MAKE) -C doc/en clean $(MAKE) -C doc/developers clean SPHINX_DEPENDENCIES = \ doc/en/release-notes/index.txt \ doc/en/user-reference/index.txt \ doc/developers/Makefile \ doc/developers/make.bat NEWS_FILES = $(call sw,doc/en/release-notes/brz-*.txt) doc/en/user-reference/index.txt: $(MAN_DEPENDENCIES) LANGUAGE=C $(PYTHON) tools/generate_docs.py -o $@ rstx doc/en/release-notes/index.txt: $(NEWS_FILES) tools/generate_release_notes.py $(PYTHON) tools/generate_release_notes.py $@ $(NEWS_FILES) doc/%/Makefile: doc/en/Makefile $(PYTHON) -c "import shutil; shutil.copyfile('$<', '$@')" doc/%/make.bat: doc/en/make.bat $(PYTHON) -c "import shutil; shutil.copyfile('$<', '$@')" # Build the html docs using Sphinx. html-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en html $(MAKE) -C doc/developers api html # Build the PDF docs using Sphinx. This requires numerous LaTeX # packages. See http://sphinx.pocoo.org/builders.html for details. # Note: We don't currently build PDFs for the Russian docs because # they require additional packages to be installed (to handle # Russian hyphenation rules, etc.) pdf-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en latex $(MAKE) -C doc/developers latex $(MAKE) -C doc/en/_build/latex all-pdf $(MAKE) -C doc/developers/_build/latex all-pdf # Build the CHM (Windows Help) docs using Sphinx. # Note: HtmlHelp Workshop needs to be used on the generated hhp files # to generate the final chm files. chm-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en htmlhelp $(MAKE) -C doc/developers htmlhelp # Build the texinfo files using Sphinx. texinfo-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en texinfo $(MAKE) -C doc/developers texinfo ### Documentation Website ### # Where to build the website DOC_WEBSITE_BUILD = build_doc_website # Build and package docs into a website, complete with downloads. doc-website: html-sphinx pdf-sphinx $(PYTHON) tools/package_docs.py doc/en $(DOC_WEBSITE_BUILD) $(PYTHON) tools/package_docs.py doc/developers $(DOC_WEBSITE_BUILD) ### Miscellaneous Documentation Targets ### # build a png of our performance task list # this is no longer built by default; you can build it if you want to look at it doc/developers/performance.png: doc/developers/performance.dot @echo Generating $@ @dot -Tpng $< -o$@ || echo "Dot not installed; skipping generation of $@" ### Windows Support ### # make all the installers completely from scratch, using zc.buildout # to fetch the dependencies # These are files that need to be copied into the build location to boostrap # the build process. # Note that the path is relative to tools/win32 BUILDOUT_FILES = buildout.cfg \ buildout-templates/bin/build-installer.bat.in \ ostools.py bootstrap.py installer-all: @echo Make all the installers from scratch @# Build everything in a separate directory, to avoid cluttering the WT $(PYTHON) tools/win32/ostools.py makedir build-win32 @# cd to tools/win32 so that the relative paths are copied correctly cd tools/win32 && $(PYTHON) ostools.py copytree $(BUILDOUT_FILES) ../../build-win32 @# There seems to be a bug in gf.release.brz, It doesn't correctly update @# existing release directories, so delete them manually before building @# It means things may be rebuilt that don't need to be, but at least @# it will be correct when they do. cd build-win32 && $(PYTHON) ostools.py remove release */release cd build-win32 && $(PYTHON) bootstrap.py cd build-win32 && bin/buildout cd build-win32 && bin/build-installer.bat $(BRZ_TARGET) $(PLUGIN_TARGET) clean-installer-all: $(PYTHON) tools/win32/ostools.py remove build-win32 # make brz.exe for win32 with py2exe exe: @echo *** Make brz.exe $(PYTHON) tools/win32/ostools.py remove breezy/*.pyd $(PYTHON) setup.py build_ext -i -f $(PYTHON_BUILDFLAGS) $(PYTHON) setup.py py2exe > py2exe.log $(PYTHON) tools/win32/ostools.py copytodir tools/win32/start_brz.bat win32_brz.exe $(PYTHON) tools/win32/ostools.py copytodir tools/win32/breezy.url win32_brz.exe # win32 installer for brz.exe installer: exe copy-docs @echo *** Make Windows installer $(PYTHON) tools/win32/run_script.py cog.py -d -o tools/win32/brz.iss tools/win32/brz.iss.cog iscc /Q tools/win32/brz.iss py-inst-37: docs $(PYTHON37) setup.py bdist_wininst --install-script="brz-win32-bdist-postinstall.py" -d . python-installer: py-inst-37 copy-docs: docs $(PYTHON) tools/win32/ostools.py copytodir README win32_brz.exe/doc $(PYTHON) tools/win32/ostools.py copydir doc/en/_build/html win32_brz.exe/doc $(PYTHON) tools/win32/ostools.py copydir doc/developers/_build/html win32_brz.exe/doc/developers # clean on win32 all installer-related files and directories clean-win32: clean-docs $(PYTHON) tools/win32/ostools.py remove build $(PYTHON) tools/win32/ostools.py remove win32_brz.exe $(PYTHON) tools/win32/ostools.py remove py2exe.log $(PYTHON) tools/win32/ostools.py remove tools/win32/brz.iss $(PYTHON) tools/win32/ostools.py remove brz-setup*.exe $(PYTHON) tools/win32/ostools.py remove brz-*win32.exe $(PYTHON) tools/win32/ostools.py remove dist # i18n targets .PHONY: update-pot po/brz.pot update-pot: po/brz.pot TRANSLATABLE_PYFILES:=$(shell find breezy -name '*.py' \ | grep -v 'breezy/tests/' \ | grep -v 'breezy/doc' \ ) po/brz.pot: $(PYFILES) $(DOCFILES) ./brz export-pot --include-duplicates > po/brz.pot echo $(TRANSLATABLE_PYFILES) | xargs \ xgettext --package-name "brz" \ --msgid-bugs-address "" \ --copyright-holder "Breezy Developers" \ --from-code UTF-8 --join --sort-by-file --add-comments=i18n: \ -d bzr -p po -o brz.pot ### Packaging Targets ### .PHONY: dist check-dist-tarball # build a distribution source tarball dist: version=`./brz version --short` && \ echo Building distribution of brz $$version && \ expbasedir=`mktemp -t -d tmp_brz_dist.XXXXXXXXXX` && \ expdir=$$expbasedir/brz-$$version && \ tarball=$$PWD/../breezy-$$version.tar.gz && \ $(MAKE) clean && \ $(MAKE) && \ $(PYTHON) setup.py sdist -d $$PWD/.. && \ gpg --detach-sign --armor $$tarball && \ rm -rf $$expbasedir # run all tests in a previously built tarball check-dist-tarball: tmpdir=`mktemp -t -d tmp_brz_check_dist.XXXXXXXXXX` && \ version=`./brz version --short` && \ tarball=$$PWD/../breezy-$$version.tar.gz && \ tar Cxz $$tmpdir -f $$tarball && \ $(MAKE) -C $$tmpdir/breezy-$$version check && \ rm -rf $$tmpdir reformat: ruff format breezy check:: check-formatting check-formatting: ruff format --check breezy .testrepository: testr init testr: .testrepository all testr run --parallel breezy-3.3.11/NEWS000066400000000000000000000002151477433716000136100ustar00rootroot00000000000000The NEWS file has been moved and split into multiple files (one per release series). The NEWS files are now found in doc/en/release-notes/. breezy-3.3.11/README.rst000066400000000000000000000046261477433716000146120ustar00rootroot00000000000000Breezy (``brz``) is a decentralized revision control system, designed to be easy for developers and end users alike. By default, Breezy provides support for both the `Bazaar `_ and `Git `_ file formats. You can install from source by following the instructions in the INSTALL file. At the moment of writing there are no binary packages available. To learn how to use Breezy, see the official documentation in the `doc` directory or refer to the Bazaar documentation at . Breezy is Free Software, and is released under the GNU General Public License, version 2 or later. Breezy is a friendly fork of the Bazaar (``bzr``) project, hosted on http://bazaar.canonical.com/. It is backwards compatibility with Bazaar's disk format and protocols. One of the key differences with Bazaar is that Breezy runs on Python 3, rather than on Python 2. Breezy highlights ================= Breezy directly supports both central version control (like cvs/svn) and distributed version control (like git/hg). Developers can organize their workspace in whichever way they want on a per project basis including: * checkouts (like svn) * feature branches (like hg) * shared working tree (like git). It also directly supports and encourages a large number of development best practices like refactoring and pre-commit regression testing. Users can choose between our command line tool and our cross-platform GUI application. For further details, see our website. Feedback ======== If you encounter any problems with Breezy, need help understanding it, or would like to offer suggestions or feedback, please get in touch with us: * Ask a question through our web support interface, at https://answers.launchpad.net/brz/ * Report bugs at https://bugs.launchpad.net/brz/+filebug * Write to the mailing list at bazaar@lists.canonical.com You can join the list at . You don't need to subscribe to post, but your first post will be held briefly for manual moderation. Please mention that you are using Breezy rather than Bazaar. * Talk to us in irc://irc.oftc.net/breezy or `#breezy:matrix.org `_. Our mission is to make a version control tool that developers LOVE to use and that casual contributors feel confident with. Please let us know how we're going. The Breezy Team breezy-3.3.11/SECURITY.md000066400000000000000000000004321477433716000147030ustar00rootroot00000000000000# Security Policy ## Supported Versions | Version | Supported | | -------- | ------------------ | | 3.1.x | :white_check_mark: | | 3.0.x | :x: | ## Reporting a Vulnerability Please report security issues by e-mail to breezy-core@googlegroups.com. breezy-3.3.11/TODO000066400000000000000000000001151477433716000136000ustar00rootroot00000000000000For things to do in Breezy development, see https://bugs.launchpad.net/brz/ breezy-3.3.11/apport/000077500000000000000000000000001477433716000144205ustar00rootroot00000000000000breezy-3.3.11/apport/README000066400000000000000000000006171477433716000153040ustar00rootroot00000000000000Bazaar supports semi-automatic bug reporting through Apport . If apport is not installed, an exception is printed to stderr in the usual way. For this to work properly it's suggested that two files be installed when a package of brz is installed: ``brz.conf`` into ``/etc/apport/crashdb.conf.d`` ``source_brz.py`` into ``/usr/share/apport/package-hooks`` breezy-3.3.11/apport/brz-crashdb.conf000066400000000000000000000002561477433716000174730ustar00rootroot00000000000000brz = { # most brz bugs are upstream bugs; file them there 'impl': 'launchpad', 'project': 'brz', 'bug_pattern_base': 'http://people.canonical.com/~pitti/bugpatterns', } breezy-3.3.11/apport/source_brz.py000066400000000000000000000027051477433716000171530ustar00rootroot00000000000000"""apport package hook for Breezy.""" # Copyright (c) 2009, 2010 Canonical Ltd. # Author: Matt Zimmerman # and others import os from apport.hookutils import * # noqa: F403 brz_log = os.path.expanduser("~/.brz.log") dot_brz = os.path.expanduser("~/.config/breezy") def _add_log_tail(report): # may have already been added in-process if "BrzLogTail" in report: return with open(brz_log) as f: brz_log_lines = f.readlines() brz_log_lines.reverse() brz_log_tail = [] blanks = 0 for line in brz_log_lines: if line == "\n": blanks += 1 brz_log_tail.append(line) if blanks >= 2: break brz_log_tail.reverse() report["BrzLogTail"] = "".join(brz_log_tail) def add_info(report): _add_log_tail(report) if "BrzPlugins" not in report: # may already be present in-process report["BrzPlugins"] = command_output(["brz", "plugins", "-v"]) # by default assume brz crashes are upstream bugs; this relies on # having a brz entry under /etc/apport/crashdb.conf.d/ report["CrashDB"] = "brz" # these may contain some sensitive info (smtp_passwords) # TODO: strip that out and attach the rest # attach_file_if_exists(report, # os.path.join(dot_brz, 'breezy.conf', 'BrzConfig') # attach_file_if_exists(report, # os.path.join(dot_brz, 'locations.conf', 'BrzLocations') # vim: expandtab shiftwidth=4 breezy-3.3.11/breezy/000077500000000000000000000000001477433716000144135ustar00rootroot00000000000000breezy-3.3.11/breezy/__init__.py000066400000000000000000000211771477433716000165340ustar00rootroot00000000000000# Copyright (C) 2005-2013, 2016, 2017 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """All of Breezy. Developer documentation is available at https://www.breezy-vcs.org/developers/. Some particularly interesting things in breezy are: * breezy.initialize -- setup the library for use * breezy.plugin.load_plugins -- load all installed plugins * breezy.branch.Branch.open -- open a branch * breezy.workingtree.WorkingTree.open -- open a working tree We hope you enjoy this library. """ import time # Keep track of when breezy was first imported, so that we can give rough # timestamps relative to program start in the log file kept by breezy.trace. _start_time = time.time() import codecs import sys __copyright__ = ( "Copyright 2005-2012 Canonical Ltd.\nCopyright 2017-2025 Breezy developers" ) # same format as sys.version_info: "A tuple containing the five components of # the version number: major, minor, micro, releaselevel, and serial. All # values except releaselevel are integers; the release level is 'alpha', # 'beta', 'candidate', or 'final'. The version_info value corresponding to the # Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a # releaselevel of 'dev' for unreleased under-development code. version_info = (3, 3, 11, "final", 0) def _format_version_tuple(version_info): """Turn a version number 2, 3 or 5-tuple into a short string. This format matches and the typical presentation used in Python output. This also checks that the version is reasonable: the sub-release must be zero for final releases. >>> print(_format_version_tuple((1, 0, 0, 'final', 0))) 1.0.0 >>> print(_format_version_tuple((1, 2, 0, 'dev', 0))) 1.2.0.dev >>> print(_format_version_tuple((1, 2, 0, 'dev', 1))) 1.2.0.dev1 >>> print(_format_version_tuple((1, 1, 1, 'candidate', 2))) 1.1.1.rc2 >>> print(_format_version_tuple((2, 1, 0, 'beta', 1))) 2.1.b1 >>> print(_format_version_tuple((1, 4, 0))) 1.4.0 >>> print(_format_version_tuple((1, 4))) 1.4 >>> print(_format_version_tuple((2, 1, 0, 'final', 42))) 2.1.0.42 >>> print(_format_version_tuple((1, 4, 0, 'wibble', 0))) 1.4.0.wibble.0 """ if len(version_info) == 2: main_version = f"{version_info[0]}.{version_info[1]}" else: main_version = f"{version_info[0]}.{version_info[1]}.{version_info[2]}" if len(version_info) <= 3: return main_version release_type = version_info[3] sub = version_info[4] if release_type == "final" and sub == 0: sub_string = "" elif release_type == "final": sub_string = "." + str(sub) elif release_type == "dev" and sub == 0: sub_string = ".dev" elif release_type == "dev": sub_string = ".dev" + str(sub) elif release_type in ("alpha", "beta"): if version_info[2] == 0: main_version = f"{version_info[0]}.{version_info[1]}" sub_string = "." + release_type[0] + str(sub) elif release_type == "candidate": sub_string = ".rc" + str(sub) else: return ".".join(map(str, version_info)) return main_version + sub_string __version__ = _format_version_tuple(version_info) version_string = __version__ _core_version_string = ".".join(map(str, version_info[:3])) def _patch_filesystem_default_encoding(new_enc): """Change the Python process global encoding for filesystem names. The effect is to change how open() and other builtin functions handle unicode filenames on posix systems. This should only be done near startup. The new encoding string passed to this function must survive until process termination, otherwise the interpreter may access uninitialized memory. The use of intern() may defer breakage is but is not enough, the string object should be secure against module reloading and during teardown. """ try: import ctypes pythonapi = getattr(ctypes, "pythonapi", None) if pythonapi is not None: old_ptr = ctypes.c_void_p.in_dll(pythonapi, "Py_FileSystemDefaultEncoding") has_enc = ctypes.c_int.in_dll(pythonapi, "Py_HasFileSystemDefaultEncoding") as_utf8 = ctypes.PYFUNCTYPE( ctypes.POINTER(ctypes.c_char), ctypes.py_object )(("PyUnicode_AsUTF8", pythonapi)) except (ImportError, ValueError): return # No ctypes or not CPython implementation, do nothing new_enc = sys.intern(new_enc) enc_ptr = as_utf8(new_enc) has_enc.value = 1 old_ptr.value = ctypes.cast(enc_ptr, ctypes.c_void_p).value if sys.getfilesystemencoding() != new_enc: raise RuntimeError("Failed to change the filesystem default encoding") return new_enc # When running under the brz script, override bad filesystem default encoding. # This is not safe to do for all users of breezy, other scripts should instead # just ensure a usable locale is set via the $LANG variable on posix systems. _fs_enc = sys.getfilesystemencoding() if getattr(sys, "_brz_default_fs_enc", None) is not None: if _fs_enc is None or codecs.lookup(_fs_enc).name == "ascii": _fs_enc = _patch_filesystem_default_encoding(sys._brz_default_fs_enc) # type: ignore if _fs_enc is None: _fs_enc = "ascii" else: _fs_enc = codecs.lookup(_fs_enc).name # brz has various bits of global state that are slowly being eliminated. # This variable is intended to permit any new state-like things to be attached # to a library_state.BzrLibraryState object rather than getting new global # variables that need to be hunted down. Accessing the current BzrLibraryState # through this variable is not encouraged: it is better to pass it around as # part of the context of an operation than to look it up directly, but when # that is too hard, it is better to use this variable than to make a brand new # global variable. # If using this variable by looking it up (because it can't be easily obtained) # it is important to store the reference you get, rather than looking it up # repeatedly; that way your code will behave properly in the breezy test suite # and from programs that do use multiple library contexts. _global_state = None def initialize(setup_ui=True, stdin=None, stdout=None, stderr=None): """Set up everything needed for normal use of breezy. Most applications that embed breezy, including brz itself, should call this function to initialize various subsystems. More options may be added in future so callers should use named arguments. The object returned by this function can be used as a contex manager through the 'with' statement to automatically shut down when the process is finished with breezy. However it's not necessary to separately enter the context as well as starting brz: breezy is ready to go when this function returns. :param setup_ui: If true (default) use a terminal UI; otherwise some other ui_factory must be assigned to `breezy.ui.ui_factory` by the caller. :param stdin, stdout, stderr: If provided, use these for terminal IO; otherwise use the files in `sys`. :return: A context manager for the use of breezy. The __exit__ should be called by the caller before exiting their process or otherwise stopping use of breezy. Advanced callers can use BzrLibraryState directly. """ from breezy import library_state, trace if setup_ui: import breezy.ui stdin = stdin or sys.stdin stdout = stdout or sys.stdout stderr = stderr or sys.stderr ui_factory = breezy.ui.make_ui_for_terminal(stdin, stdout, stderr) else: ui_factory = None tracer = trace.DefaultConfig() state = library_state.BzrLibraryState(ui=ui_factory, trace=tracer) # Start automatically in case people don't realize this returns a context. state._start() return state def get_global_state(): if _global_state is None: return initialize() return _global_state def test_suite(): import tests return tests.test_suite() breezy-3.3.11/breezy/__main__.py000066400000000000000000000053251477433716000165120ustar00rootroot00000000000000# Copyright (C) 2005-2013, 2016, 2017 Canonical Ltd # Copyright (C) 2018-2020 Breezy Developers # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Breezy -- a free distributed version-control tool.""" import os import sys profiling = False if "--profile-imports" in sys.argv: import profile_imports profile_imports.install() profiling = True if os.name == "posix": import locale try: locale.setlocale(locale.LC_ALL, "") except locale.Error as e: sys.stderr.write( "brz: warning: {}\n" " bzr could not set the application locale.\n" " Although this should be no problem for bzr itself, it might\n" " cause problems with some plugins. To investigate the issue,\n" " look at the output of the locale(1p) tool.\n".format(e) ) # Use better default than ascii with posix filesystems that deal in bytes # natively even when the C locale or no locale at all is given. Note that # we need an immortal string for the hack, hence the lack of a hyphen. sys._brz_default_fs_enc = "utf8" # type: ignore def main(): import breezy.breakin breezy.breakin.hook_debugger_to_signal() import breezy.commands import breezy.trace with breezy.initialize(): exit_val = breezy.commands.main() if profiling: profile_imports.log_stack_info(sys.stderr) # By this point we really have completed everything we want to do, and # there's no point doing any additional cleanup. Abruptly exiting here # stops any background threads getting into trouble as code is unloaded, # and it may also be slightly faster, through avoiding gc of objects that # are just about to be discarded anyhow. This does mean that atexit hooks # won't run but we don't use them. Also file buffers won't be flushed, # but our policy is to always close files from a finally block. -- mbp 20070215 exitfunc = getattr(sys, "exitfunc", None) if exitfunc is not None: exitfunc() os._exit(exit_val) if __name__ == "__main__": main() breezy-3.3.11/breezy/_annotator_py.py000066400000000000000000000321101477433716000176360ustar00rootroot00000000000000# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Functionality for doing annotations in the 'optimal' way.""" from . import errors, osutils, ui from . import graph as _mod_graph class Annotator: """Class that drives performing annotations.""" def __init__(self, vf): """Create a new Annotator from a VersionedFile.""" self._vf = vf self._parent_map = {} self._text_cache = {} # Map from key => number of nexts that will be built from this key self._num_needed_children = {} self._annotations_cache = {} self._heads_provider = None self._ann_tuple_cache = {} def _update_needed_children(self, key, parent_keys): for parent_key in parent_keys: if parent_key in self._num_needed_children: self._num_needed_children[parent_key] += 1 else: self._num_needed_children[parent_key] = 1 def _get_needed_keys(self, key): """Determine the texts we need to get from the backing vf. :return: (vf_keys_needed, ann_keys_needed) vf_keys_needed These are keys that we need to get from the vf ann_keys_needed Texts which we have in self._text_cache but we don't have annotations for. We need to yield these in the proper order so that we can get proper annotations. """ parent_map = self._parent_map # We need 1 extra copy of the node we will be looking at when we are # done self._num_needed_children[key] = 1 vf_keys_needed = set() ann_keys_needed = set() needed_keys = {key} while needed_keys: parent_lookup = [] next_parent_map = {} for key in needed_keys: if key in self._parent_map: # We don't need to lookup this key in the vf if key not in self._text_cache: # Extract this text from the vf vf_keys_needed.add(key) elif key not in self._annotations_cache: # We do need to annotate ann_keys_needed.add(key) next_parent_map[key] = self._parent_map[key] else: parent_lookup.append(key) vf_keys_needed.add(key) needed_keys = set() next_parent_map.update(self._vf.get_parent_map(parent_lookup)) for key, parent_keys in next_parent_map.items(): if parent_keys is None: # No graph versionedfile parent_keys = () next_parent_map[key] = () self._update_needed_children(key, parent_keys) needed_keys.update( [key for key in parent_keys if key not in parent_map] ) parent_map.update(next_parent_map) # _heads_provider does some graph caching, so it is only valid # while self._parent_map hasn't changed self._heads_provider = None return vf_keys_needed, ann_keys_needed def _get_needed_texts(self, key, pb=None): """Get the texts we need to properly annotate key. :param key: A Key that is present in self._vf :return: Yield (this_key, text, num_lines) 'text' is an opaque object that just has to work with whatever matcher object we are using. Currently it is always 'lines' but future improvements may change this to a simple text string. """ keys, ann_keys = self._get_needed_keys(key) if pb is not None: pb.update("getting stream", 0, len(keys)) stream = self._vf.get_record_stream(keys, "topological", True) for _idx, record in enumerate(stream): if pb is not None: pb.update("extracting", 0, len(keys)) if record.storage_kind == "absent": raise errors.RevisionNotPresent(record.key, self._vf) this_key = record.key lines = record.get_bytes_as("lines") num_lines = len(lines) self._text_cache[this_key] = lines yield this_key, lines, num_lines for key in ann_keys: lines = self._text_cache[key] num_lines = len(lines) yield key, lines, num_lines def _get_parent_annotations_and_matches(self, key, text, parent_key): """Get the list of annotations for the parent, and the matching lines. :param text: The opaque value given by _get_needed_texts :param parent_key: The key for the parent text :return: (parent_annotations, matching_blocks) parent_annotations is a list as long as the number of lines in parent matching_blocks is a list of (parent_idx, text_idx, len) tuples indicating which lines match between the two texts """ parent_lines = self._text_cache[parent_key] parent_annotations = self._annotations_cache[parent_key] # PatienceSequenceMatcher should probably be part of Policy from patiencediff import PatienceSequenceMatcher matcher = PatienceSequenceMatcher(None, parent_lines, text) matching_blocks = matcher.get_matching_blocks() return parent_annotations, matching_blocks def _update_from_first_parent(self, key, annotations, lines, parent_key): """Reannotate this text relative to its first parent.""" (parent_annotations, matching_blocks) = ( self._get_parent_annotations_and_matches(key, lines, parent_key) ) for parent_idx, lines_idx, match_len in matching_blocks: # For all matching regions we copy across the parent annotations annotations[lines_idx : lines_idx + match_len] = parent_annotations[ parent_idx : parent_idx + match_len ] def _update_from_other_parents( self, key, annotations, lines, this_annotation, parent_key ): """Reannotate this text relative to a second (or more) parent.""" (parent_annotations, matching_blocks) = ( self._get_parent_annotations_and_matches(key, lines, parent_key) ) last_ann = None last_parent = None last_res = None # TODO: consider making all annotations unique and then using 'is' # everywhere. Current results claim that isn't any faster, # because of the time spent deduping # deduping also saves a bit of memory. For NEWS it saves ~1MB, # but that is out of 200-300MB for extracting everything, so a # fairly trivial amount for parent_idx, lines_idx, match_len in matching_blocks: # For lines which match this parent, we will now resolve whether # this parent wins over the current annotation ann_sub = annotations[lines_idx : lines_idx + match_len] par_sub = parent_annotations[parent_idx : parent_idx + match_len] if ann_sub == par_sub: continue for idx in range(match_len): ann = ann_sub[idx] par_ann = par_sub[idx] ann_idx = lines_idx + idx if ann == par_ann: # Nothing to change continue if ann == this_annotation: # Originally claimed 'this', but it was really in this # parent annotations[ann_idx] = par_ann continue # Resolve the fact that both sides have a different value for # last modified if ann == last_ann and par_ann == last_parent: annotations[ann_idx] = last_res else: new_ann = set(ann) new_ann.update(par_ann) new_ann = tuple(sorted(new_ann)) annotations[ann_idx] = new_ann last_ann = ann last_parent = par_ann last_res = new_ann def _record_annotation(self, key, parent_keys, annotations): self._annotations_cache[key] = annotations for parent_key in parent_keys: num = self._num_needed_children[parent_key] num -= 1 if num == 0: del self._text_cache[parent_key] del self._annotations_cache[parent_key] # Do we want to clean up _num_needed_children at this point as # well? self._num_needed_children[parent_key] = num def _annotate_one(self, key, text, num_lines): this_annotation = (key,) # Note: annotations will be mutated by calls to _update_from* annotations = [this_annotation] * num_lines parent_keys = self._parent_map[key] if parent_keys: self._update_from_first_parent(key, annotations, text, parent_keys[0]) for parent in parent_keys[1:]: self._update_from_other_parents( key, annotations, text, this_annotation, parent ) self._record_annotation(key, parent_keys, annotations) def add_special_text(self, key, parent_keys, text): """Add a specific text to the graph. This is used to add a text which is not otherwise present in the versioned file. (eg. a WorkingTree injecting 'current:' into the graph to annotate the edited content.) :param key: The key to use to request this text be annotated :param parent_keys: The parents of this text :param text: A string containing the content of the text """ self._parent_map[key] = parent_keys self._text_cache[key] = osutils.split_lines(text) self._heads_provider = None def annotate(self, key): """Return annotated fulltext for the given key. :param key: A tuple defining the text to annotate :return: ([annotations], [lines]) annotations is a list of tuples of keys, one for each line in lines each key is a possible source for the given line. lines the text of "key" as a list of lines """ with ui.ui_factory.nested_progress_bar() as pb: for text_key, text, num_lines in self._get_needed_texts(key, pb=pb): self._annotate_one(text_key, text, num_lines) try: annotations = self._annotations_cache[key] except KeyError as exc: raise errors.RevisionNotPresent(key, self._vf) from exc return annotations, self._text_cache[key] def _get_heads_provider(self): if self._heads_provider is None: self._heads_provider = _mod_graph.KnownGraph(self._parent_map) return self._heads_provider def _resolve_annotation_tie(self, the_heads, line, tiebreaker): if tiebreaker is None: head = sorted(the_heads)[0] else: # Backwards compatibility, break up the heads into pairs and # resolve the result next_head = iter(the_heads) head = next(next_head) for possible_head in next_head: annotated_lines = ((head, line), (possible_head, line)) head = tiebreaker(annotated_lines)[0] return head def annotate_flat(self, key): """Determine the single-best-revision to source for each line. This is meant as a compatibility thunk to how annotate() used to work. :return: [(ann_key, line)] A list of tuples with a single annotation key for each line. """ from .annotate import _break_annotation_tie custom_tiebreaker = _break_annotation_tie annotations, lines = self.annotate(key) out = [] heads = self._get_heads_provider().heads append = out.append for annotation, line in zip(annotations, lines): if len(annotation) == 1: head = annotation[0] else: the_heads = heads(annotation) if len(the_heads) == 1: for head in the_heads: # noqa: B007 break # get the item out of the set else: head = self._resolve_annotation_tie( the_heads, line, custom_tiebreaker ) append((head, line)) return out breezy-3.3.11/breezy/_annotator_pyx.pyx000066400000000000000000000252721477433716000202310ustar00rootroot00000000000000# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Functionality for doing annotations in the 'optimal' way""" cdef extern from "python-compat.h": pass from cpython.dict cimport PyDict_GetItem, PyDict_SetItem from cpython.list cimport (PyList_Append, PyList_CheckExact, PyList_GET_ITEM, PyList_GET_SIZE, PyList_SetItem, PyList_Sort) from cpython.object cimport Py_EQ, Py_LT, PyObject, PyObject_RichCompareBool from cpython.ref cimport Py_INCREF from cpython.tuple cimport (PyTuple_CheckExact, PyTuple_GET_ITEM, PyTuple_GET_SIZE, PyTuple_New, PyTuple_SET_ITEM) cdef extern from "Python.h": void PyTuple_SET_ITEM_ptr "PyTuple_SET_ITEM" (object, Py_ssize_t, PyObject *) void Py_INCREF_ptr "Py_INCREF" (PyObject *) void Py_DECREF_ptr "Py_DECREF" (PyObject *) int PyObject_RichCompareBool_ptr "PyObject_RichCompareBool" ( PyObject *, PyObject *, int opid) from . import _annotator_py cdef int _check_match_ranges(list parent_annotations, list annotations, Py_ssize_t parent_idx, Py_ssize_t lines_idx, Py_ssize_t match_len) except -1: if parent_idx + match_len > len(parent_annotations): raise ValueError('Match length exceeds len of' ' parent_annotations %s > %s' % (parent_idx + match_len, len(parent_annotations))) if lines_idx + match_len > len(annotations): raise ValueError('Match length exceeds len of' ' annotations %s > %s' % (lines_idx + match_len, len(annotations))) return 0 cdef PyObject *_next_tuple_entry(object tpl, Py_ssize_t *pos): # cannot_raise """Return the next entry from this tuple. :param tpl: The tuple we are investigating, *must* be a PyTuple :param pos: The last item we found. Will be updated to the new position. This cannot raise an exception, as it does no error checking. """ pos[0] = pos[0] + 1 if pos[0] >= PyTuple_GET_SIZE(tpl): return NULL return PyTuple_GET_ITEM(tpl, pos[0]) cdef object _combine_annotations(ann_one, ann_two, cache): """Combine the annotations from both sides.""" cdef Py_ssize_t pos_one, pos_two, len_one, len_two cdef Py_ssize_t out_pos cdef PyObject *temp cdef PyObject *left cdef PyObject *right if (PyObject_RichCompareBool(ann_one, ann_two, Py_LT)): cache_key = (ann_one, ann_two) else: cache_key = (ann_two, ann_one) temp = PyDict_GetItem(cache, cache_key) if temp != NULL: return temp if not PyTuple_CheckExact(ann_one) or not PyTuple_CheckExact(ann_two): raise TypeError('annotations must be tuples') # We know that annotations are tuples, and that both sides are already # sorted, so we can just walk and update a new list. pos_one = -1 pos_two = -1 out_pos = 0 left = _next_tuple_entry(ann_one, &pos_one) right = _next_tuple_entry(ann_two, &pos_two) new_ann = PyTuple_New(PyTuple_GET_SIZE(ann_one) + PyTuple_GET_SIZE(ann_two)) while left != NULL and right != NULL: # left == right is done by PyObject_RichCompareBool_ptr, however it # avoids a function call for a very common case. Drops 'time bzr # annotate NEWS' from 7.25s to 7.16s, so it *is* a visible impact. if (left == right or PyObject_RichCompareBool_ptr(left, right, Py_EQ)): # Identical values, step both Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) right = _next_tuple_entry(ann_two, &pos_two) elif (PyObject_RichCompareBool_ptr(left, right, Py_LT)): # left < right or right == NULL Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) else: # right < left or left == NULL Py_INCREF_ptr(right) PyTuple_SET_ITEM_ptr(new_ann, out_pos, right) right = _next_tuple_entry(ann_two, &pos_two) out_pos = out_pos + 1 while left != NULL: Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) out_pos = out_pos + 1 while right != NULL: Py_INCREF_ptr(right) PyTuple_SET_ITEM_ptr(new_ann, out_pos, right) right = _next_tuple_entry(ann_two, &pos_two) out_pos = out_pos + 1 if out_pos != PyTuple_GET_SIZE(new_ann): # Timing _PyTuple_Resize was not significantly faster that slicing # PyTuple_Resize((new_ann), out_pos) new_ann = new_ann[0:out_pos] PyDict_SetItem(cache, cache_key, new_ann) return new_ann cdef int _apply_parent_annotations(list annotations, list parent_annotations, matching_blocks) except -1: """Apply the annotations from parent_annotations into annotations. matching_blocks defines the ranges that match. """ cdef Py_ssize_t parent_idx, lines_idx, match_len, idx cdef PyObject *par_temp cdef PyObject *ann_temp # For NEWS and breezy/builtins.py, over 99% of the lines are simply copied # across from the parent entry. So this routine is heavily optimized for # that. Would be interesting if we could use memcpy() but we have to incref # and decref for parent_idx, lines_idx, match_len in matching_blocks: _check_match_ranges(parent_annotations, annotations, parent_idx, lines_idx, match_len) annotations[lines_idx:lines_idx + match_len] = parent_annotations[parent_idx:parent_idx + match_len] return 0 cdef int _merge_annotations(object this_annotation, list annotations, list parent_annotations, matching_blocks, ann_cache) except -1: cdef Py_ssize_t parent_idx, ann_idx, lines_idx, match_len, idx cdef Py_ssize_t pos cdef object ann cdef object par last_ann = None last_parent = None last_res = None for parent_idx, lines_idx, match_len in matching_blocks: _check_match_ranges(parent_annotations, annotations, parent_idx, lines_idx, match_len) # For lines which match this parent, we will now resolve whether # this parent wins over the current annotation for idx from 0 <= idx < match_len: ann_idx = lines_idx + idx ann = annotations[ann_idx] par_ann = parent_annotations[parent_idx + idx] if (ann == par_ann): # This is parent, do nothing # Pointer comparison is fine here. Value comparison would # be ok, but it will be handled in the final if clause by # merging the two tuples into the same tuple # Avoiding the Py_INCREF and function call to # PyObject_RichCompareBool using pointer comparison drops # timing from 215ms => 125ms continue if (ann is this_annotation): # Originally claimed 'this', but it was really in this # parent annotations[ann_idx] = par_ann continue # Resolve the fact that both sides have a different value for # last modified if (ann is last_ann and par_ann is last_parent): annotations[ann_idx] = last_res else: new_ann = _combine_annotations(ann, par_ann, ann_cache) annotations[ann_idx] = new_ann last_ann = ann last_parent = par_ann last_res = new_ann return 0 class Annotator(_annotator_py.Annotator): """Class that drives performing annotations.""" def _update_from_first_parent(self, key, annotations, lines, parent_key): """Reannotate this text relative to its first parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) _apply_parent_annotations(annotations, parent_annotations, matching_blocks) def _update_from_other_parents(self, key, annotations, lines, this_annotation, parent_key): """Reannotate this text relative to a second (or more) parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) _merge_annotations(this_annotation, annotations, parent_annotations, matching_blocks, self._ann_tuple_cache) def annotate_flat(self, key): """Determine the single-best-revision to source for each line. This is meant as a compatibility thunk to how annotate() used to work. """ cdef Py_ssize_t pos, num_lines from . import annotate custom_tiebreaker = annotate._break_annotation_tie annotations, lines = self.annotate(key) num_lines = len(lines) out = [] heads = self._get_heads_provider().heads for pos from 0 <= pos < num_lines: annotation = annotations[pos] line = lines[pos] if len(annotation) == 1: head = annotation[0] else: the_heads = heads(annotation) if len(the_heads) == 1: for head in the_heads: break # get the item out of the set else: # We need to resolve the ambiguity, for now just pick the # sorted smallest head = self._resolve_annotation_tie(the_heads, line, custom_tiebreaker) PyList_Append(out, (head, line)) return out breezy-3.3.11/breezy/_chunks_to_lines_py.py000066400000000000000000000044611477433716000210300ustar00rootroot00000000000000# Copyright (C) 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """The python implementation of chunks_to_lines.""" def chunks_to_lines(chunks): """Re-split chunks into simple lines. Each entry in the result should contain a single newline at the end. Except for the last entry which may not have a final newline. If chunks is already a simple list of lines, we return it directly. :param chunks: An list/tuple of strings. If chunks is already a list of lines, then we will return it as-is. :return: A list of strings. """ # Optimize for a very common case when chunks are already lines last_no_newline = False for chunk in chunks: if last_no_newline: # Only the last chunk is allowed to not have a trailing newline # Getting here means the last chunk didn't have a newline, and we # have a chunk following it break if not chunk: # Empty strings are never valid lines break elif b"\n" in chunk[:-1]: # This chunk has an extra '\n', so we will have to split it break elif chunk[-1:] != b"\n": # This chunk does not have a trailing newline last_no_newline = True else: # All of the lines (but possibly the last) have a single newline at the # end of the string. # For the last one, we allow it to not have a trailing newline, but it # is not allowed to be an empty string. return chunks # These aren't simple lines, just join and split again. from breezy import osutils return osutils._split_lines(b"".join(chunks)) breezy-3.3.11/breezy/_chunks_to_lines_pyx.pyx000066400000000000000000000106361477433716000214110ustar00rootroot00000000000000# Copyright (C) 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Pyrex extensions for converting chunks to lines.""" cdef extern from "python-compat.h": pass from cpython.bytes cimport (PyBytes_AS_STRING, PyBytes_CheckExact, PyBytes_FromStringAndSize, PyBytes_GET_SIZE) from cpython.list cimport PyList_Append from libc.string cimport memchr def chunks_to_lines(chunks): """Re-split chunks into simple lines. Each entry in the result should contain a single newline at the end. Except for the last entry which may not have a final newline. If chunks is already a simple list of lines, we return it directly. :param chunks: An list/tuple of strings. If chunks is already a list of lines, then we will return it as-is. :return: A list of strings. """ cdef char *c_str cdef char *newline cdef char *c_last cdef Py_ssize_t the_len cdef int last_no_newline # Check to see if the chunks are already lines last_no_newline = 0 for chunk in chunks: if last_no_newline: # We have a chunk which followed a chunk without a newline, so this # is not a simple list of lines. break # Switching from PyBytes_AsStringAndSize to PyBytes_CheckExact and # then the macros GET_SIZE and AS_STRING saved us 40us / 470us. # It seems PyBytes_AsStringAndSize can actually trigger a conversion, # which we don't want anyway. if not PyBytes_CheckExact(chunk): raise TypeError('chunk is not a string') the_len = PyBytes_GET_SIZE(chunk) if the_len == 0: # An empty string is never a valid line break c_str = PyBytes_AS_STRING(chunk) c_last = c_str + the_len - 1 newline = memchr(c_str, c'\n', the_len) if newline != c_last: if newline == NULL: # Missing a newline. Only valid as the last line last_no_newline = 1 else: # There is a newline in the middle, we must resplit break else: # Everything was already a list of lines return chunks # We know we need to create a new list of lines lines = [] tail = None # Any remainder from the previous chunk for chunk in chunks: if tail is not None: chunk = tail + chunk tail = None if not PyBytes_CheckExact(chunk): raise TypeError('chunk is not a string') the_len = PyBytes_GET_SIZE(chunk) if the_len == 0: # An empty string is never a valid line, and we don't need to # append anything continue c_str = PyBytes_AS_STRING(chunk) c_last = c_str + the_len - 1 newline = memchr(c_str, c'\n', the_len) if newline == c_last: # A simple line PyList_Append(lines, chunk) elif newline == NULL: # A chunk without a newline, if this is the last entry, then we # allow it tail = chunk else: # We have a newline in the middle, loop until we've consumed all # lines while newline != NULL: line = PyBytes_FromStringAndSize(c_str, newline - c_str + 1) PyList_Append(lines, line) c_str = newline + 1 if c_str > c_last: # We are done break the_len = c_last - c_str + 1 newline = memchr(c_str, c'\n', the_len) if newline == NULL: tail = PyBytes_FromStringAndSize(c_str, the_len) break if tail is not None: PyList_Append(lines, tail) return lines breezy-3.3.11/breezy/_known_graph_py.py000066400000000000000000000341501477433716000201540ustar00rootroot00000000000000# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Implementation of Graph algorithms when we have already loaded everything.""" from collections import deque from . import errors, revision class _KnownGraphNode: """Represents a single object in the known graph.""" __slots__ = ("child_keys", "gdfo", "key", "parent_keys") def __init__(self, key, parent_keys): self.key = key self.parent_keys = parent_keys self.child_keys = [] # Greatest distance from origin self.gdfo = None def __repr__(self): return "{}({} gdfo:{} par:{} child:{})".format( self.__class__.__name__, self.key, self.gdfo, self.parent_keys, self.child_keys, ) class _MergeSortNode: """Information about a specific node in the merge graph.""" __slots__ = ("end_of_merge", "key", "merge_depth", "revno") def __init__(self, key, merge_depth, revno, end_of_merge): self.key = key self.merge_depth = merge_depth self.revno = revno self.end_of_merge = end_of_merge class KnownGraph: """This is a class which assumes we already know the full graph.""" def __init__(self, parent_map, do_cache=True): """Create a new KnownGraph instance. :param parent_map: A dictionary mapping key => parent_keys """ self._nodes = {} # Maps {frozenset(revision_id, revision_id): heads} self._known_heads = {} self.do_cache = do_cache self._initialize_nodes(parent_map) self._find_gdfo() def _initialize_nodes(self, parent_map): """Populate self._nodes. After this has finished: - self._nodes will have an entry for every entry in parent_map. - ghosts will have a parent_keys = None, - all nodes found will also have .child_keys populated with all known child_keys, """ nodes = self._nodes for key, parent_keys in parent_map.items(): if key in nodes: node = nodes[key] node.parent_keys = parent_keys else: node = _KnownGraphNode(key, parent_keys) nodes[key] = node for parent_key in parent_keys: try: parent_node = nodes[parent_key] except KeyError: parent_node = _KnownGraphNode(parent_key, None) nodes[parent_key] = parent_node parent_node.child_keys.append(key) def _find_tails(self): return [node for node in self._nodes.values() if not node.parent_keys] def _find_tips(self): return [node for node in self._nodes.values() if not node.child_keys] def _find_gdfo(self): nodes = self._nodes known_parent_gdfos = {} pending = [] for node in self._find_tails(): node.gdfo = 1 pending.append(node) while pending: node = pending.pop() for child_key in node.child_keys: child = nodes[child_key] if child_key in known_parent_gdfos: known_gdfo = known_parent_gdfos[child_key] + 1 present = True else: known_gdfo = 1 present = False if child.gdfo is None or node.gdfo + 1 > child.gdfo: child.gdfo = node.gdfo + 1 if known_gdfo == len(child.parent_keys): # We are the last parent updating that node, we can # continue from there pending.append(child) if present: del known_parent_gdfos[child_key] else: # Update known_parent_gdfos for a key we couldn't process known_parent_gdfos[child_key] = known_gdfo def add_node(self, key, parent_keys): """Add a new node to the graph. If this fills in a ghost, then the gdfos of all children will be updated accordingly. :param key: The node being added. If this is a duplicate, this is a no-op. :param parent_keys: The parents of the given node. :return: None (should we return if this was a ghost, etc?) """ nodes = self._nodes if key in nodes: node = nodes[key] if node.parent_keys is None: node.parent_keys = parent_keys # A ghost is being added, we can no-longer trust the heads # cache, so clear it self._known_heads.clear() else: # Make sure we compare a list to a list, as tuple != list. parent_keys = list(parent_keys) existing_parent_keys = list(node.parent_keys) if parent_keys == existing_parent_keys: return # Identical content else: raise ValueError( "Parent key mismatch, existing node {}" " has parents of {} not {}".format( key, existing_parent_keys, parent_keys ) ) else: node = _KnownGraphNode(key, parent_keys) nodes[key] = node parent_gdfo = 0 for parent_key in parent_keys: try: parent_node = nodes[parent_key] except KeyError: parent_node = _KnownGraphNode(parent_key, None) # Ghosts and roots have gdfo 1 parent_node.gdfo = 1 nodes[parent_key] = parent_node if parent_gdfo < parent_node.gdfo: parent_gdfo = parent_node.gdfo parent_node.child_keys.append(key) node.gdfo = parent_gdfo + 1 # Now fill the gdfo to all children # Note that this loop is slightly inefficient, in that we may visit the # same child (and its decendents) more than once, however, it is # 'efficient' in that we only walk to nodes that would be updated, # rather than all nodes # We use a deque rather than a simple list stack, to go for BFD rather # than DFD. So that if a longer path is possible, we walk it before we # get to the final child pending = deque([node]) while pending: node = pending.popleft() next_gdfo = node.gdfo + 1 for child_key in node.child_keys: child = nodes[child_key] if child.gdfo < next_gdfo: # This child is being updated, we need to check its # children child.gdfo = next_gdfo pending.append(child) def heads(self, keys): """Return the heads from amongst keys. This is done by searching the ancestries of each key. Any key that is reachable from another key is not returned; all the others are. This operation scales with the relative depth between any two keys. It uses gdfo to avoid walking all ancestry. :param keys: An iterable of keys. :return: A set of the heads. Note that as a set there is no ordering information. Callers will need to filter their input to create order if they need it. """ candidate_nodes = {key: self._nodes[key] for key in keys} if revision.NULL_REVISION in candidate_nodes: # NULL_REVISION is only a head if it is the only entry candidate_nodes.pop(revision.NULL_REVISION) if not candidate_nodes: return frozenset([revision.NULL_REVISION]) if len(candidate_nodes) < 2: # No or only one candidate return frozenset(candidate_nodes) heads_key = frozenset(candidate_nodes) # Do we have a cached result ? try: heads = self._known_heads[heads_key] return heads except KeyError: pass # Let's compute the heads seen = set() pending = [] min_gdfo = None for node in candidate_nodes.values(): if node.parent_keys: pending.extend(node.parent_keys) if min_gdfo is None or node.gdfo < min_gdfo: min_gdfo = node.gdfo nodes = self._nodes while pending: node_key = pending.pop() if node_key in seen: # node already appears in some ancestry continue seen.add(node_key) node = nodes[node_key] if node.gdfo <= min_gdfo: continue if node.parent_keys: pending.extend(node.parent_keys) heads = heads_key.difference(seen) if self.do_cache: self._known_heads[heads_key] = heads return heads def topo_sort(self): """Return the nodes in topological order. All parents must occur before all children. """ for node in self._nodes.values(): if node.gdfo is None: raise errors.GraphCycleError(self._nodes) pending = self._find_tails() pending_pop = pending.pop pending_append = pending.append topo_order = [] topo_order_append = topo_order.append num_seen_parents = dict.fromkeys(self._nodes, 0) while pending: node = pending_pop() if node.parent_keys is not None: # We don't include ghost parents topo_order_append(node.key) for child_key in node.child_keys: child_node = self._nodes[child_key] seen_parents = num_seen_parents[child_key] + 1 if seen_parents == len(child_node.parent_keys): # All parents have been processed, enqueue this child pending_append(child_node) # This has been queued up, stop tracking it del num_seen_parents[child_key] else: num_seen_parents[child_key] = seen_parents # We started from the parents, so we don't need to do anymore work return topo_order def gc_sort(self): """Return a reverse topological ordering which is 'stable'. There are a few constraints: 1) Reverse topological (all children before all parents) 2) Grouped by prefix 3) 'stable' sorting, so that we get the same result, independent of machine, or extra data. To do this, we use the same basic algorithm as topo_sort, but when we aren't sure what node to access next, we sort them lexicographically. """ tips = self._find_tips() # Split the tips based on prefix prefix_tips = {} for node in tips: if node.key.__class__ is str or len(node.key) == 1: prefix = "" else: prefix = node.key[0] prefix_tips.setdefault(prefix, []).append(node) num_seen_children = dict.fromkeys(self._nodes, 0) result = [] for prefix in sorted(prefix_tips): pending = sorted(prefix_tips[prefix], key=lambda n: n.key, reverse=True) while pending: node = pending.pop() if node.parent_keys is None: # Ghost node, skip it continue result.append(node.key) for parent_key in sorted(node.parent_keys, reverse=True): parent_node = self._nodes[parent_key] seen_children = num_seen_children[parent_key] + 1 if seen_children == len(parent_node.child_keys): # All children have been processed, enqueue this parent pending.append(parent_node) # This has been queued up, stop tracking it del num_seen_children[parent_key] else: num_seen_children[parent_key] = seen_children return result def merge_sort(self, tip_key): """Compute the merge sorted graph output.""" from breezy import tsort as_parent_map = { node.key: node.parent_keys for node in self._nodes.values() if node.parent_keys is not None } # We intentionally always generate revnos and never force the # mainline_revisions # Strip the sequence_number that merge_sort generates return [ _MergeSortNode(key, merge_depth, revno, end_of_merge) for _, key, merge_depth, revno, end_of_merge in tsort.merge_sort( as_parent_map, tip_key, mainline_revisions=None, generate_revno=True ) ] def get_parent_keys(self, key): """Get the parents for a key. Returns a list containg the parents keys. If the key is a ghost, None is returned. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of parents """ return self._nodes[key].parent_keys def get_child_keys(self, key): """Get the children for a key. Returns a list containg the children keys. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of children """ return self._nodes[key].child_keys breezy-3.3.11/breezy/_known_graph_pyx.pyx000066400000000000000000001116021477433716000205320ustar00rootroot00000000000000# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Implementation of Graph algorithms when we have already loaded everything. """ cdef extern from "python-compat.h": pass from cpython.bytes cimport PyBytes_CheckExact from cpython.dict cimport (PyDict_CheckExact, PyDict_DelItem, PyDict_GetItem, PyDict_Next, PyDict_SetItem, PyDict_Size) from cpython.list cimport (PyList_Append, PyList_CheckExact, PyList_GET_ITEM, PyList_GET_SIZE, PyList_SetItem) from cpython.object cimport Py_LT, PyObject, PyObject_RichCompareBool from cpython.ref cimport Py_INCREF from cpython.tuple cimport (PyTuple_CheckExact, PyTuple_GET_ITEM, PyTuple_GET_SIZE, PyTuple_New, PyTuple_SET_ITEM) import collections import gc from . import errors, revision cdef object NULL_REVISION NULL_REVISION = revision.NULL_REVISION cdef class _KnownGraphNode: """Represents a single object in the known graph.""" cdef object key cdef object parents cdef object children cdef public long gdfo cdef int seen cdef object extra def __init__(self, key): self.key = key self.parents = None self.children = [] # Greatest distance from origin self.gdfo = -1 self.seen = 0 self.extra = None property child_keys: def __get__(self): cdef _KnownGraphNode child keys = [] for child in self.children: PyList_Append(keys, child.key) return keys property parent_keys: def __get__(self): if self.parents is None: return None cdef _KnownGraphNode parent keys = [] for parent in self.parents: PyList_Append(keys, parent.key) return keys cdef clear_references(self): self.parents = None self.children = None def __repr__(self): cdef _KnownGraphNode node parent_keys = [] if self.parents is not None: for node in self.parents: parent_keys.append(node.key) child_keys = [] if self.children is not None: for node in self.children: child_keys.append(node.key) return '%s(%s gdfo:%s par:%s child:%s)' % ( self.__class__.__name__, self.key, self.gdfo, parent_keys, child_keys) cdef _KnownGraphNode _get_list_node(lst, Py_ssize_t pos): cdef PyObject *temp_node temp_node = PyList_GET_ITEM(lst, pos) return <_KnownGraphNode>temp_node cdef _KnownGraphNode _get_tuple_node(tpl, Py_ssize_t pos): cdef PyObject *temp_node temp_node = PyTuple_GET_ITEM(tpl, pos) return <_KnownGraphNode>temp_node def get_key(node): cdef _KnownGraphNode real_node real_node = node return real_node.key cdef object _sort_list_nodes(object lst_or_tpl, int reverse): """Sort a list of _KnownGraphNode objects. If lst_or_tpl is a list, it is allowed to mutate in place. It may also just return the input list if everything is already sorted. """ cdef _KnownGraphNode node1, node2 cdef int do_swap, is_tuple cdef Py_ssize_t length is_tuple = PyTuple_CheckExact(lst_or_tpl) if not (is_tuple or PyList_CheckExact(lst_or_tpl)): raise TypeError('lst_or_tpl must be a list or tuple.') length = len(lst_or_tpl) if length == 0 or length == 1: return lst_or_tpl if length == 2: if is_tuple: node1 = _get_tuple_node(lst_or_tpl, 0) node2 = _get_tuple_node(lst_or_tpl, 1) else: node1 = _get_list_node(lst_or_tpl, 0) node2 = _get_list_node(lst_or_tpl, 1) if reverse: do_swap = PyObject_RichCompareBool(node1.key, node2.key, Py_LT) else: do_swap = PyObject_RichCompareBool(node2.key, node1.key, Py_LT) if not do_swap: return lst_or_tpl if is_tuple: return (node2, node1) else: # Swap 'in-place', since lists are mutable Py_INCREF(node1) PyList_SetItem(lst_or_tpl, 1, node1) Py_INCREF(node2) PyList_SetItem(lst_or_tpl, 0, node2) return lst_or_tpl # For all other sizes, we just use 'sorted()' if is_tuple: # Note that sorted() is just list(iterable).sort() lst_or_tpl = list(lst_or_tpl) lst_or_tpl.sort(key=get_key, reverse=reverse) return lst_or_tpl cdef class _MergeSorter cdef class KnownGraph: """This is a class which assumes we already know the full graph.""" cdef public object _nodes cdef public object _known_heads cdef public int do_cache def __init__(self, parent_map, do_cache=True): """Create a new KnownGraph instance. :param parent_map: A dictionary mapping key => parent_keys """ # tests at pre-allocating the node dict actually slowed things down self._nodes = {} # Maps {sorted(revision_id, revision_id): heads} self._known_heads = {} self.do_cache = int(do_cache) # TODO: consider disabling gc since we are allocating a lot of nodes # that won't be collectable anyway. real world testing has not # shown a specific impact, yet. self._initialize_nodes(parent_map) self._find_gdfo() def __dealloc__(self): cdef _KnownGraphNode child cdef Py_ssize_t pos cdef PyObject *temp_node while PyDict_Next(self._nodes, &pos, NULL, &temp_node): child = <_KnownGraphNode>temp_node child.clear_references() cdef _KnownGraphNode _get_or_create_node(self, key): cdef PyObject *temp_node cdef _KnownGraphNode node temp_node = PyDict_GetItem(self._nodes, key) if temp_node == NULL: node = _KnownGraphNode(key) PyDict_SetItem(self._nodes, key, node) else: node = <_KnownGraphNode>temp_node return node cdef _populate_parents(self, _KnownGraphNode node, parent_keys): cdef Py_ssize_t num_parent_keys, pos cdef _KnownGraphNode parent_node num_parent_keys = len(parent_keys) # We know how many parents, so we pre allocate the tuple parent_nodes = PyTuple_New(num_parent_keys) for pos from 0 <= pos < num_parent_keys: # Note: it costs us 10ms out of 40ms to lookup all of these # parents, it doesn't seem to be an allocation overhead, # but rather a lookup overhead. There doesn't seem to be # a way around it, and that is one reason why # KnownGraphNode maintains a direct pointer to the parent # node. # We use [] because parent_keys may be a tuple or list parent_node = self._get_or_create_node(parent_keys[pos]) # PyTuple_SET_ITEM will steal a reference, so INCREF first Py_INCREF(parent_node) PyTuple_SET_ITEM(parent_nodes, pos, parent_node) PyList_Append(parent_node.children, node) node.parents = parent_nodes def _initialize_nodes(self, parent_map): """Populate self._nodes. After this has finished: - self._nodes will have an entry for every entry in parent_map. - ghosts will have a parent_keys = None, - all nodes found will also have child_keys populated with all known child keys, """ cdef PyObject *temp_key cdef PyObject *temp_parent_keys cdef PyObject *temp_node cdef Py_ssize_t pos cdef _KnownGraphNode node cdef _KnownGraphNode parent_node if not PyDict_CheckExact(parent_map): raise TypeError('parent_map should be a dict of {key:parent_keys}') # for key, parent_keys in parent_map.iteritems(): pos = 0 while PyDict_Next(parent_map, &pos, &temp_key, &temp_parent_keys): key = temp_key parent_keys = temp_parent_keys node = self._get_or_create_node(key) self._populate_parents(node, parent_keys) def _find_tails(self): cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos tails = [] pos = 0 while PyDict_Next(self._nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if node.parents is None or PyTuple_GET_SIZE(node.parents) == 0: node.gdfo = 1 PyList_Append(tails, node) return tails def _find_tips(self): cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos tips = [] pos = 0 while PyDict_Next(self._nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if PyList_GET_SIZE(node.children) == 0: PyList_Append(tips, node) return tips def _find_gdfo(self): cdef _KnownGraphNode node cdef _KnownGraphNode child cdef PyObject *temp cdef Py_ssize_t pos cdef int replace cdef Py_ssize_t last_item cdef long next_gdfo pending = self._find_tails() last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: # Avoid pop followed by push, instead, peek, and replace # timing shows this is 930ms => 770ms for OOo node = _get_list_node(pending, last_item) last_item = last_item - 1 next_gdfo = node.gdfo + 1 for pos from 0 <= pos < PyList_GET_SIZE(node.children): child = _get_list_node(node.children, pos) if next_gdfo > child.gdfo: child.gdfo = next_gdfo child.seen = child.seen + 1 if child.seen == PyTuple_GET_SIZE(child.parents): # This child is populated, queue it to be walked last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(child) # SetItem steals a ref PyList_SetItem(pending, last_item, child) else: PyList_Append(pending, child) # We have queued this node, we don't need to track it # anymore child.seen = 0 def add_node(self, key, parent_keys): """Add a new node to the graph. If this fills in a ghost, then the gdfos of all children will be updated accordingly. :param key: The node being added. If this is a duplicate, this is a no-op. :param parent_keys: The parents of the given node. :return: None (should we return if this was a ghost, etc?) """ cdef PyObject *maybe_node cdef _KnownGraphNode node, parent_node, child_node cdef long parent_gdfo, next_gdfo maybe_node = PyDict_GetItem(self._nodes, key) if maybe_node != NULL: node = <_KnownGraphNode>maybe_node if node.parents is None: # We are filling in a ghost self._populate_parents(node, parent_keys) # We can't trust cached heads anymore self._known_heads.clear() else: # Ensure that the parent_key list matches existing_parent_keys = [] for parent_node in node.parents: existing_parent_keys.append(parent_node.key) # Make sure we use a list for the comparison, in case it was a # tuple, etc parent_keys = list(parent_keys) if existing_parent_keys == parent_keys: # Exact match, nothing more to do return else: raise ValueError('Parent key mismatch, existing node %s' ' has parents of %s not %s' % (key, existing_parent_keys, parent_keys)) else: node = _KnownGraphNode(key) PyDict_SetItem(self._nodes, key, node) self._populate_parents(node, parent_keys) parent_gdfo = 0 for parent_node in node.parents: if parent_node.gdfo == -1: # This is a newly introduced ghost, so it gets gdfo of 1 parent_node.gdfo = 1 if parent_gdfo < parent_node.gdfo: parent_gdfo = parent_node.gdfo node.gdfo = parent_gdfo + 1 # Now fill the gdfo to all children # Note that this loop is slightly inefficient, in that we may visit the # same child (and its decendents) more than once, however, it is # 'efficient' in that we only walk to nodes that would be updated, # rather than all nodes # We use a deque rather than a simple list stack, to go for BFD rather # than DFD. So that if a longer path is possible, we walk it before we # get to the final child pending = collections.deque([node]) pending_popleft = pending.popleft pending_append = pending.append while pending: node = pending_popleft() next_gdfo = node.gdfo + 1 for child_node in node.children: if child_node.gdfo < next_gdfo: # This child is being updated, we need to check its # children child_node.gdfo = next_gdfo pending_append(child_node) def heads(self, keys): """Return the heads from amongst keys. This is done by searching the ancestries of each key. Any key that is reachable from another key is not returned; all the others are. This operation scales with the relative depth between any two keys. It uses gdfo to avoid walking all ancestry. :param keys: An iterable of keys. :return: A set of the heads. Note that as a set there is no ordering information. Callers will need to filter their input to create order if they need it. """ cdef PyObject *maybe_node cdef PyObject *maybe_heads cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos, last_item cdef long min_gdfo heads_key = frozenset(keys) maybe_heads = PyDict_GetItem(self._known_heads, heads_key) if maybe_heads != NULL: return maybe_heads # Not cached, compute it ourselves candidate_nodes = {} for key in keys: maybe_node = PyDict_GetItem(self._nodes, key) if maybe_node == NULL: raise KeyError('key %s not in nodes' % (key,)) PyDict_SetItem(candidate_nodes, key, maybe_node) maybe_node = PyDict_GetItem(candidate_nodes, NULL_REVISION) if maybe_node != NULL: # NULL_REVISION is only a head if it is the only entry candidate_nodes.pop(NULL_REVISION) if not candidate_nodes: return frozenset([NULL_REVISION]) # The keys changed, so recalculate heads_key heads_key = frozenset(candidate_nodes) if PyDict_Size(candidate_nodes) < 2: return heads_key cleanup = [] pending = [] # we know a gdfo cannot be longer than a linear chain of all nodes min_gdfo = PyDict_Size(self._nodes) + 1 # Build up nodes that need to be walked, note that starting nodes are # not added to seen() pos = 0 while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if node.parents is not None: pending.extend(node.parents) if node.gdfo < min_gdfo: min_gdfo = node.gdfo # Now do all the real work last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.seen: # node already appears in some ancestry continue PyList_Append(cleanup, node) node.seen = 1 if node.gdfo <= min_gdfo: continue if node.parents is not None and PyTuple_GET_SIZE(node.parents) > 0: for pos from 0 <= pos < PyTuple_GET_SIZE(node.parents): parent_node = _get_tuple_node(node.parents, pos) last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(parent_node) # SetItem steals a ref PyList_SetItem(pending, last_item, parent_node) else: PyList_Append(pending, parent_node) heads = [] pos = 0 while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if not node.seen: PyList_Append(heads, node.key) heads = frozenset(heads) for pos from 0 <= pos < PyList_GET_SIZE(cleanup): node = _get_list_node(cleanup, pos) node.seen = 0 if self.do_cache: PyDict_SetItem(self._known_heads, heads_key, heads) return heads def topo_sort(self): """Return the nodes in topological order. All parents must occur before all children. """ # This is, for the most part, the same iteration order that we used for # _find_gdfo, consider finding a way to remove the duplication # In general, we find the 'tails' (nodes with no parents), and then # walk to the children. For children that have all of their parents # yielded, we queue up the child to be yielded as well. cdef _KnownGraphNode node cdef _KnownGraphNode child cdef PyObject *temp cdef Py_ssize_t pos cdef int replace cdef Py_ssize_t last_item pending = self._find_tails() if PyList_GET_SIZE(pending) == 0 and len(self._nodes) > 0: raise errors.GraphCycleError(self._nodes) topo_order = [] last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: # Avoid pop followed by push, instead, peek, and replace # timing shows this is 930ms => 770ms for OOo node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.parents is not None: # We don't include ghost parents PyList_Append(topo_order, node.key) for pos from 0 <= pos < PyList_GET_SIZE(node.children): child = _get_list_node(node.children, pos) if child.gdfo == -1: # We know we have a graph cycle because a node has a parent # which we couldn't find raise errors.GraphCycleError(self._nodes) child.seen = child.seen + 1 if child.seen == PyTuple_GET_SIZE(child.parents): # All parents of this child have been yielded, queue this # one to be yielded as well last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(child) # SetItem steals a ref PyList_SetItem(pending, last_item, child) else: PyList_Append(pending, child) # We have queued this node, we don't need to track it # anymore child.seen = 0 # We started from the parents, so we don't need to do anymore work return topo_order def gc_sort(self): """Return a reverse topological ordering which is 'stable'. There are a few constraints: 1) Reverse topological (all children before all parents) 2) Grouped by prefix 3) 'stable' sorting, so that we get the same result, independent of machine, or extra data. To do this, we use the same basic algorithm as topo_sort, but when we aren't sure what node to access next, we sort them lexicographically. """ cdef PyObject *temp cdef Py_ssize_t pos, last_item cdef _KnownGraphNode node, node2, parent_node tips = self._find_tips() # Split the tips based on prefix prefix_tips = {} for pos from 0 <= pos < PyList_GET_SIZE(tips): node = _get_list_node(tips, pos) if PyBytes_CheckExact(node.key) or len(node.key) == 1: prefix = '' else: prefix = node.key[0] temp = PyDict_GetItem(prefix_tips, prefix) if temp == NULL: prefix_tips[prefix] = [node] else: tip_nodes = temp PyList_Append(tip_nodes, node) result = [] for prefix in sorted(prefix_tips): temp = PyDict_GetItem(prefix_tips, prefix) assert temp != NULL tip_nodes = temp pending = _sort_list_nodes(tip_nodes, 1) last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.parents is None: # Ghost continue PyList_Append(result, node.key) # Sorting the parent keys isn't strictly necessary for stable # sorting of a given graph. But it does help minimize the # differences between graphs # For bzr.dev ancestry: # 4.73ms no sort # 7.73ms RichCompareBool sort parents = _sort_list_nodes(node.parents, 1) for pos from 0 <= pos < len(parents): if PyTuple_CheckExact(parents): parent_node = _get_tuple_node(parents, pos) else: parent_node = _get_list_node(parents, pos) # TODO: GraphCycle detection parent_node.seen = parent_node.seen + 1 if (parent_node.seen == PyList_GET_SIZE(parent_node.children)): # All children have been processed, queue up this # parent last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(parent_node) # SetItem steals a ref PyList_SetItem(pending, last_item, parent_node) else: PyList_Append(pending, parent_node) parent_node.seen = 0 return result def merge_sort(self, tip_key): """Compute the merge sorted graph output.""" cdef _MergeSorter sorter # TODO: consider disabling gc since we are allocating a lot of nodes # that won't be collectable anyway. real world testing has not # shown a specific impact, yet. sorter = _MergeSorter(self, tip_key) return sorter.topo_order() def get_parent_keys(self, key): """Get the parents for a key Returns a list containing the parents keys. If the key is a ghost, None is returned. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of parents """ return self._nodes[key].parent_keys def get_child_keys(self, key): """Get the children for a key Returns a list containing the children keys. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of children """ return self._nodes[key].child_keys cdef class _MergeSortNode: """Tracks information about a node during the merge_sort operation.""" # Public api cdef public object key cdef public long merge_depth cdef public object end_of_merge # True/False Is this the end of the current merge # Private api, used while computing the information cdef _KnownGraphNode left_parent cdef _KnownGraphNode left_pending_parent cdef object pending_parents # list of _KnownGraphNode for non-left parents cdef long _revno_first cdef long _revno_second cdef long _revno_last # TODO: turn these into flag/bit fields rather than individual members cdef int is_first_child # Is this the first child? cdef int seen_by_child # A child node has seen this parent cdef int completed # Fully Processed def __init__(self, key): self.key = key self.merge_depth = -1 self.left_parent = None self.left_pending_parent = None self.pending_parents = None self._revno_first = -1 self._revno_second = -1 self._revno_last = -1 self.is_first_child = 0 self.seen_by_child = 0 self.completed = 0 def __repr__(self): return '%s(%s depth:%s rev:%s,%s,%s first:%s seen:%s)' % ( self.__class__.__name__, self.key, self.merge_depth, self._revno_first, self._revno_second, self._revno_last, self.is_first_child, self.seen_by_child) cdef int has_pending_parents(self): # cannot_raise if self.left_pending_parent is not None or self.pending_parents: return 1 return 0 cdef object _revno(self): if self._revno_first == -1: if self._revno_second != -1: raise RuntimeError('Something wrong with: %s' % (self,)) return (self._revno_last,) else: return (self._revno_first, self._revno_second, self._revno_last) property revno: def __get__(self): return self._revno() cdef class _MergeSorter: """This class does the work of computing the merge_sort ordering. We have some small advantages, in that we get all the extra information that KnownGraph knows, like knowing the child lists, etc. """ # Current performance numbers for merge_sort(bzr_dev_parent_map): # 302ms tsort.merge_sort() # 91ms graph.KnownGraph().merge_sort() # 40ms kg.merge_sort() cdef KnownGraph graph cdef object _depth_first_stack # list cdef Py_ssize_t _last_stack_item # offset to last item on stack # cdef object _ms_nodes # dict of key => _MergeSortNode cdef object _revno_to_branch_count # {revno => num child branches} cdef object _scheduled_nodes # List of nodes ready to be yielded def __init__(self, known_graph, tip_key): cdef _KnownGraphNode node self.graph = known_graph # self._ms_nodes = {} self._revno_to_branch_count = {} self._depth_first_stack = [] self._last_stack_item = -1 self._scheduled_nodes = [] if (tip_key is not None and tip_key != NULL_REVISION and tip_key != (NULL_REVISION,)): node = self.graph._nodes[tip_key] self._push_node(node, 0) cdef _MergeSortNode _get_ms_node(self, _KnownGraphNode node): cdef PyObject *temp_node cdef _MergeSortNode ms_node if node.extra is None: ms_node = _MergeSortNode(node.key) node.extra = ms_node else: ms_node = <_MergeSortNode>node.extra return ms_node cdef _push_node(self, _KnownGraphNode node, long merge_depth): cdef _KnownGraphNode parent_node cdef _MergeSortNode ms_node, ms_parent_node cdef Py_ssize_t pos ms_node = self._get_ms_node(node) ms_node.merge_depth = merge_depth if node.parents is None: raise RuntimeError('ghost nodes should not be pushed' ' onto the stack: %s' % (node,)) if PyTuple_GET_SIZE(node.parents) > 0: parent_node = _get_tuple_node(node.parents, 0) ms_node.left_parent = parent_node if parent_node.parents is None: # left-hand ghost ms_node.left_pending_parent = None ms_node.left_parent = None else: ms_node.left_pending_parent = parent_node if PyTuple_GET_SIZE(node.parents) > 1: ms_node.pending_parents = [] for pos from 1 <= pos < PyTuple_GET_SIZE(node.parents): parent_node = _get_tuple_node(node.parents, pos) if parent_node.parents is None: # ghost continue PyList_Append(ms_node.pending_parents, parent_node) ms_node.is_first_child = 1 if ms_node.left_parent is not None: ms_parent_node = self._get_ms_node(ms_node.left_parent) if ms_parent_node.seen_by_child: ms_node.is_first_child = 0 ms_parent_node.seen_by_child = 1 self._last_stack_item = self._last_stack_item + 1 if self._last_stack_item < PyList_GET_SIZE(self._depth_first_stack): Py_INCREF(node) # SetItem steals a ref PyList_SetItem(self._depth_first_stack, self._last_stack_item, node) else: PyList_Append(self._depth_first_stack, node) cdef _pop_node(self): cdef PyObject *temp cdef _MergeSortNode ms_node, ms_parent_node, ms_prev_node cdef _KnownGraphNode node, parent_node, prev_node node = _get_list_node(self._depth_first_stack, self._last_stack_item) ms_node = <_MergeSortNode>node.extra self._last_stack_item = self._last_stack_item - 1 if ms_node.left_parent is not None: # Assign the revision number from the left-hand parent ms_parent_node = <_MergeSortNode>ms_node.left_parent.extra if ms_node.is_first_child: # First child just increments the final digit ms_node._revno_first = ms_parent_node._revno_first ms_node._revno_second = ms_parent_node._revno_second ms_node._revno_last = ms_parent_node._revno_last + 1 else: # Not the first child, make a new branch # (mainline_revno, branch_count, 1) if ms_parent_node._revno_first == -1: # Mainline ancestor, the increment is on the last digit base_revno = ms_parent_node._revno_last else: base_revno = ms_parent_node._revno_first temp = PyDict_GetItem(self._revno_to_branch_count, base_revno) if temp == NULL: branch_count = 1 else: branch_count = (temp) + 1 PyDict_SetItem(self._revno_to_branch_count, base_revno, branch_count) ms_node._revno_first = base_revno ms_node._revno_second = branch_count ms_node._revno_last = 1 else: temp = PyDict_GetItem(self._revno_to_branch_count, 0) if temp == NULL: # The first root node doesn't have a 3-digit revno root_count = 0 ms_node._revno_first = -1 ms_node._revno_second = -1 ms_node._revno_last = 1 else: root_count = (temp) + 1 ms_node._revno_first = 0 ms_node._revno_second = root_count ms_node._revno_last = 1 PyDict_SetItem(self._revno_to_branch_count, 0, root_count) ms_node.completed = 1 if PyList_GET_SIZE(self._scheduled_nodes) == 0: # The first scheduled node is always the end of merge ms_node.end_of_merge = True else: prev_node = _get_list_node(self._scheduled_nodes, PyList_GET_SIZE(self._scheduled_nodes) - 1) ms_prev_node = <_MergeSortNode>prev_node.extra if ms_prev_node.merge_depth < ms_node.merge_depth: # The previously pushed node is to our left, so this is the end # of this right-hand chain ms_node.end_of_merge = True elif (ms_prev_node.merge_depth == ms_node.merge_depth and prev_node not in node.parents): # The next node is not a direct parent of this node ms_node.end_of_merge = True else: ms_node.end_of_merge = False PyList_Append(self._scheduled_nodes, node) cdef _schedule_stack(self): cdef _KnownGraphNode last_node, next_node cdef _MergeSortNode ms_node, ms_last_node, ms_next_node cdef long next_merge_depth ordered = [] while self._last_stack_item >= 0: # Peek at the last item on the stack last_node = _get_list_node(self._depth_first_stack, self._last_stack_item) if last_node.gdfo == -1: # if _find_gdfo skipped a node, that means there is a graph # cycle, error out now raise errors.GraphCycleError(self.graph._nodes) ms_last_node = <_MergeSortNode>last_node.extra if not ms_last_node.has_pending_parents(): # Processed all parents, pop this node self._pop_node() continue while ms_last_node.has_pending_parents(): if ms_last_node.left_pending_parent is not None: # recurse depth first into the primary parent next_node = ms_last_node.left_pending_parent ms_last_node.left_pending_parent = None else: # place any merges in right-to-left order for scheduling # which gives us left-to-right order after we reverse # the scheduled queue. # Note: This has the effect of allocating common-new # revisions to the right-most subtree rather than the # left most, which will display nicely (you get # smaller trees at the top of the combined merge). next_node = ms_last_node.pending_parents.pop() ms_next_node = self._get_ms_node(next_node) if ms_next_node.completed: # this parent was completed by a child on the # call stack. skip it. continue # otherwise transfer it from the source graph into the # top of the current depth first search stack. if next_node is ms_last_node.left_parent: next_merge_depth = ms_last_node.merge_depth else: next_merge_depth = ms_last_node.merge_depth + 1 self._push_node(next_node, next_merge_depth) # and do not continue processing parents until this 'call' # has recursed. break cdef topo_order(self): cdef _MergeSortNode ms_node cdef _KnownGraphNode node cdef Py_ssize_t pos cdef PyObject *temp_key cdef PyObject *temp_node # Note: allocating a _MergeSortNode and deallocating it for all nodes # costs approx 8.52ms (21%) of the total runtime # We might consider moving the attributes into the base # KnownGraph object. self._schedule_stack() # We've set up the basic schedule, now we can continue processing the # output. # Note: This final loop costs us 40.0ms => 28.8ms (11ms, 25%) on # bzr.dev, to convert the internal Object representation into a # Tuple representation... # 2ms is walking the data and computing revno tuples # 7ms is computing the return tuple # 4ms is PyList_Append() ordered = [] # output the result in reverse order, and separate the generated info for pos from PyList_GET_SIZE(self._scheduled_nodes) > pos >= 0: node = _get_list_node(self._scheduled_nodes, pos) ms_node = <_MergeSortNode>node.extra PyList_Append(ordered, ms_node) node.extra = None # Clear out the scheduled nodes now that we're done self._scheduled_nodes = [] return ordered breezy-3.3.11/breezy/_readdir_py.py000066400000000000000000000027461477433716000172570ustar00rootroot00000000000000# Copyright (C) 2006, 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Python implementation of readdir interface.""" import stat _directory = "directory" _chardev = "chardev" _block = "block" _file = "file" _fifo = "fifo" _symlink = "symlink" _socket = "socket" _unknown = "unknown" _formats = { stat.S_IFDIR: "directory", stat.S_IFCHR: "chardev", stat.S_IFBLK: "block", stat.S_IFREG: "file", stat.S_IFIFO: "fifo", stat.S_IFLNK: "symlink", stat.S_IFSOCK: "socket", } def _kind_from_mode(stat_mode, _formats=_formats, _unknown="unknown"): """Generate a file kind from a stat mode. This is used in walkdirs. It's performance is critical: Do not mutate without careful benchmarking. """ try: return _formats[stat_mode & 0o170000] except KeyError: return _unknown breezy-3.3.11/breezy/_readdir_pyx.pyx000066400000000000000000000300551477433716000176310ustar00rootroot00000000000000# Copyright (C) 2006, 2008, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Wrapper for readdir which returns files ordered by inode.""" import os import sys cdef extern from "python-compat.h": pass cdef extern from 'errno.h': int ENOENT int ENOTDIR int EAGAIN int EINTR char *strerror(int errno) # not necessarily a real variable, but this should be close enough int errno cdef extern from 'unistd.h': int chdir(char *path) int close(int fd) int fchdir(int fd) char *getcwd(char *, int size) cdef extern from 'stdlib.h': void *malloc(int) void free(void *) cdef extern from 'sys/types.h': ctypedef long ssize_t ctypedef unsigned long size_t ctypedef long time_t ctypedef unsigned long ino_t ctypedef unsigned long long off_t ctypedef int mode_t cdef extern from 'sys/stat.h': cdef struct stat: int st_mode off_t st_size int st_dev ino_t st_ino int st_mtime int st_ctime int lstat(char *path, stat *buf) int S_ISDIR(int mode) int S_ISCHR(int mode) int S_ISBLK(int mode) int S_ISREG(int mode) int S_ISFIFO(int mode) int S_ISLNK(int mode) int S_ISSOCK(int mode) cdef extern from 'fcntl.h': int O_RDONLY int open(char *pathname, int flags, mode_t mode) cdef extern from 'Python.h': int PyErr_CheckSignals() except -1 char * PyBytes_AS_STRING(object) ctypedef struct PyObject: pass Py_ssize_t PyBytes_Size(object s) object PyList_GetItem(object lst, Py_ssize_t index) void *PyList_GetItem_object_void "PyList_GET_ITEM" (object lst, int index) int PyList_Append(object lst, object item) except -1 void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index) int PyTuple_SetItem(void *, Py_ssize_t pos, object item) except -1 int PyTuple_SetItem_obj "PyTuple_SetItem" (void *, Py_ssize_t pos, PyObject * item) except -1 void Py_INCREF(object o) void Py_DECREF(object o) void PyBytes_Concat(PyObject **string, object newpart) cdef extern from 'dirent.h': ctypedef struct dirent: char d_name[256] ino_t d_ino # the opaque C library DIR type. ctypedef struct DIR # should be DIR *, pyrex barfs. DIR * opendir(char * name) int closedir(DIR * dir) dirent *readdir(DIR *dir) cdef object _directory _directory = 'directory' cdef object _chardev _chardev = 'chardev' cdef object _block _block = 'block' cdef object _file _file = 'file' cdef object _fifo _fifo = 'fifo' cdef object _symlink _symlink = 'symlink' cdef object _socket _socket = 'socket' cdef object _unknown _unknown = 'unknown' # add a typedef struct dirent dirent to workaround pyrex cdef extern from 'readdir.h': pass cdef class _Stat: """Represent a 'stat' result.""" cdef stat _st property st_dev: def __get__(self): return self._st.st_dev property st_ino: def __get__(self): return self._st.st_ino property st_mode: def __get__(self): return self._st.st_mode property st_ctime: def __get__(self): return self._st.st_ctime property st_mtime: def __get__(self): return self._st.st_mtime property st_size: def __get__(self): return self._st.st_size def __repr__(self): """Repr is the same as a Stat object. (mode, ino, dev, nlink, uid, gid, size, None(atime), mtime, ctime) """ return repr((self.st_mode, 0, 0, 0, 0, 0, self.st_size, None, self.st_mtime, self.st_ctime)) from . import osutils cdef object _safe_utf8 _safe_utf8 = osutils.safe_utf8 cdef class UTF8DirReader: """A dir reader for utf8 file systems.""" def kind_from_mode(self, int mode): """Get the kind of a path from a mode status.""" return self._kind_from_mode(mode) cdef _kind_from_mode(self, int mode): # Files and directories are the most common - check them first. if S_ISREG(mode): return _file if S_ISDIR(mode): return _directory if S_ISCHR(mode): return _chardev if S_ISBLK(mode): return _block if S_ISLNK(mode): return _symlink if S_ISFIFO(mode): return _fifo if S_ISSOCK(mode): return _socket return _unknown def top_prefix_to_starting_dir(self, top, prefix=""): """See DirReader.top_prefix_to_starting_dir.""" return (_safe_utf8(prefix), None, None, None, _safe_utf8(top)) def read_dir(self, prefix, top): """Read a single directory from a utf8 file system. All paths in and out are utf8. This sub-function is called when we know the filesystem is already in utf8 encoding. So we don't need to transcode filenames. See DirReader.read_dir for details. """ #cdef char *_prefix = prefix #cdef char *_top = top # Use C accelerated directory listing. cdef object newval cdef int index cdef int length cdef void * atuple cdef object name cdef PyObject * new_val_obj if PyBytes_Size(prefix): relprefix = prefix + b'/' else: relprefix = b'' top_slash = top + b'/' # read_dir supplies in should-stat order. # for _, name in sorted(_listdir(top)): result = _read_dir(top) length = len(result) # result.sort() for index from 0 <= index < length: atuple = PyList_GetItem_object_void(result, index) name = PyTuple_GetItem_void_void(atuple, 1) # We have a tuple with (inode, name, None, statvalue, None) # Now edit it: # inode -> path_from_top # direct concat - faster than operator +. new_val_obj = relprefix Py_INCREF(relprefix) PyBytes_Concat(&new_val_obj, name) if NULL == new_val_obj: # PyBytes_Concat will have setup an exception, but how to get # at it? raise Exception("failed to strcat") PyTuple_SetItem_obj(atuple, 0, new_val_obj) # 1st None -> kind newval = self._kind_from_mode( (<_Stat>PyTuple_GetItem_void_void(atuple, 3)).st_mode) Py_INCREF(newval) PyTuple_SetItem(atuple, 2, newval) # 2nd None -> abspath # for all - the caller may need to stat files # etc. # direct concat - faster than operator +. new_val_obj = top_slash Py_INCREF(top_slash) PyBytes_Concat(&new_val_obj, name) if NULL == new_val_obj: # PyBytes_Concat will have setup an exception, but how to get # at it? raise Exception("failed to strcat") PyTuple_SetItem_obj(atuple, 4, new_val_obj) return result cdef raise_os_error(int errnum, char *msg_prefix, path): if errnum == EINTR: PyErr_CheckSignals() raise OSError(errnum, msg_prefix + strerror(errnum), path) cdef _read_dir(path): """Like os.listdir, this reads the contents of a directory. :param path: the directory to list. :return: a list of single-owner (the list) tuples ready for editing into the result tuples walkdirs needs to yield. They contain (inode, name, None, statvalue, None). """ cdef DIR *the_dir # currently this needs a fixup - the C code says 'dirent' but should say # 'struct dirent' cdef dirent * entry cdef dirent sentinel cdef char *name cdef int stat_result cdef _Stat statvalue global errno cdef int orig_dir_fd # Avoid chdir('') because it causes problems on Sun OS, and avoid this if # staying in . if path != b"" and path != b'.': # we change into the requested directory before reading, and back at the # end, because that turns out to make the stat calls measurably faster than # passing full paths every time. orig_dir_fd = open(".", O_RDONLY, 0) if orig_dir_fd == -1: raise_os_error(errno, "open: ", ".") if -1 == chdir(path): # Ignore the return value, because we are already raising an # exception close(orig_dir_fd) raise_os_error(errno, "chdir: ", path) else: orig_dir_fd = -1 try: the_dir = opendir(b".") if NULL == the_dir: raise_os_error(errno, "opendir: ", path) try: result = [] entry = &sentinel while entry != NULL: # Unlike most libc functions, readdir needs errno set to 0 # beforehand so that eof can be distinguished from errors. See # while True: errno = 0 entry = readdir(the_dir) if entry == NULL and (errno == EAGAIN or errno == EINTR): if errno == EINTR: PyErr_CheckSignals() # try again continue else: break if entry == NULL: if errno == ENOTDIR or errno == 0: # We see ENOTDIR at the end of a normal directory. # As ENOTDIR for read_dir(file) is triggered on opendir, # we consider ENOTDIR to be 'no error'. continue else: raise_os_error(errno, "readdir: ", path) name = entry.d_name if not (name[0] == c"." and ( (name[1] == 0) or (name[1] == c"." and name[2] == 0)) ): statvalue = _Stat() stat_result = lstat(entry.d_name, &statvalue._st) if stat_result != 0: if errno != ENOENT: raise_os_error(errno, "lstat: ", path + b"/" + entry.d_name) else: # the file seems to have disappeared after being # seen by readdir - perhaps a transient temporary # file. there's no point returning it. continue # We append a 5-tuple that can be modified in-place by the C # api: # inode to sort on (to replace with top_path) # name (to keep) # kind (None, to set) # statvalue (to keep) # abspath (None, to set) PyList_Append(result, (entry.d_ino, entry.d_name, None, statvalue, None)) finally: if -1 == closedir(the_dir): raise_os_error(errno, "closedir: ", path) finally: if -1 != orig_dir_fd: failed = False if -1 == fchdir(orig_dir_fd): # try to close the original directory anyhow failed = True if -1 == close(orig_dir_fd) or failed: raise_os_error(errno, "return to orig_dir: ", "") return result # vim: tw=79 ai expandtab sw=4 sts=4 breezy-3.3.11/breezy/_termcolor.py000066400000000000000000000036611477433716000171400ustar00rootroot00000000000000# Copyright (C) 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import sys class FG: """Unix terminal foreground color codes (16-color).""" RED = "\033[31m" GREEN = "\033[32m" YELLOW = "\033[33m" BLUE = "\033[34m" MAGENTA = "\033[35m" CYAN = "\033[36m" WHITE = "\033[37m" # Bold Foreground BOLD_RED = "\033[1;31m" BOLD_GREEN = "\033[1;32m" BOLD_YELLOW = "\033[1;33m" BOLD_BLUE = "\033[1;34m" BOLD_MAGENTA = "\033[1;35m" BOLD_CYAN = "\033[1;36m" BOLD_WHITE = "\033[1;37m" NONE = "\033[0m" class BG: """Unix terminal background color codes (16-color).""" BLACK = "\033[40m" RED = "\033[41m" GREEN = "\033[42m" YELLOW = "\033[43m" BLUE = "\033[44m" MAGENTA = "\033[45m" CYAN = "\033[46m" WHITE = "\033[47m" NONE = "\033[0m" def color_string(s, fg, bg=""): return fg + bg + s + FG.NONE def re_color_string(compiled_pattern, s, fg): return compiled_pattern.sub(fg + r"\1" + FG.NONE, s) def allow_color(): if os.name != "posix": return False if not sys.stdout.isatty(): return False try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except curses.error: return False breezy-3.3.11/breezy/add.py000066400000000000000000000121441477433716000155170ustar00rootroot00000000000000# Copyright (C) 2005-2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Helper functions for adding files to working trees.""" import os import sys from . import errors, osutils, ui from .i18n import gettext class AddAction: """A class which defines what action to take when adding a file.""" def __init__(self, to_file=None, should_print=None): """Initialize an action which prints added files to an output stream. :param to_file: The stream to write into. This is expected to take Unicode paths. If not supplied, it will default to ``sys.stdout``. :param should_print: If False, printing will be suppressed. """ self._to_file = to_file if to_file is None: self._to_file = sys.stdout self.should_print = False if should_print is not None: self.should_print = should_print def __call__(self, inv, parent_ie, path, kind, _quote=osutils.quotefn): """Add path to inventory. The default action does nothing. :param inv: The inventory we are working with. :param path: The FastPath being added :param kind: The kind of the object being added. """ if self.should_print: self._to_file.write("adding {}\n".format(_quote(path))) return None def skip_file(self, tree, path, kind, stat_value=None): """Test whether the given file should be skipped or not. The default action never skips. Note this is only called during recursive adds :param tree: The tree we are working in :param path: The path being added :param kind: The kind of object being added. :param stat: Stat result for this file, if available already :return bool. True if the file should be skipped (not added) """ return False class AddWithSkipLargeAction(AddAction): """A class that can decide to skip a file if it's considered too large.""" _max_size = None def skip_file(self, tree, path, kind, stat_value=None): if kind != "file": return False opt_name = "add.maximum_file_size" if self._max_size is None: config = tree.get_config_stack() self._max_size = config.get(opt_name) if stat_value is None: file_size = os.path.getsize(path) else: file_size = stat_value.st_size if self._max_size > 0 and file_size > self._max_size: ui.ui_factory.show_warning( gettext("skipping {0} (larger than {1} of {2} bytes)").format( path, opt_name, self._max_size ) ) return True return False class AddFromBaseAction(AddAction): """This class will try to extract file ids from another tree.""" def __init__(self, base_tree, base_path, to_file=None, should_print=None): super().__init__(to_file=to_file, should_print=should_print) self.base_tree = base_tree self.base_path = base_path def __call__(self, inv, parent_ie, path, kind): # Place the parent call # Now check to see if we can extract an id for this file file_id, base_path = self._get_base_file_id(path, parent_ie) if file_id is not None: if self.should_print: self._to_file.write( "adding {} w/ file id from {}\n".format(path, base_path) ) else: # we aren't doing anything special, so let the default # reporter happen file_id = super().__call__(inv, parent_ie, path, kind) return file_id def _get_base_file_id(self, path, parent_ie): """Look for a file id in the base branch. First, if the base tree has the parent directory, we look for a file with the same name in that directory. Else, we look for an entry in the base tree with the same path. """ try: parent_path = self.base_tree.id2path(parent_ie.file_id) except errors.NoSuchId: pass else: base_path = osutils.pathjoin(parent_path, osutils.basename(path)) base_id = self.base_tree.path2id(base_path) if base_id is not None: return (base_id, base_path) full_base_path = osutils.pathjoin(self.base_path, path) # This may return None, but it is our last attempt return self.base_tree.path2id(full_base_path), full_base_path breezy-3.3.11/breezy/annotate.py000066400000000000000000000436331477433716000166070ustar00rootroot00000000000000# Copyright (C) 2005-2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """File annotate based on weave storage.""" # TODO: Choice of more or less verbose formats: # # interposed: show more details between blocks of modified lines # TODO: Show which revision caused a line to merge into the parent # TODO: perhaps abbreviate timescales depending on how recent they are # e.g. "3:12 Tue", "13 Oct", "Oct 2005", etc. import sys import time from .lazy_import import lazy_import lazy_import( globals(), """ import patiencediff from breezy import ( tsort, ) """, ) from . import config, errors, osutils from .repository import _strip_NULL_ghosts from .revision import CURRENT_REVISION, Revision def annotate_file_tree( tree, path, to_file, verbose=False, full=False, show_ids=False, branch=None ): """Annotate path in a tree. The tree should already be read_locked() when annotate_file_tree is called. :param tree: The tree to look for revision numbers and history from. :param path: The path to annotate :param to_file: The file to output the annotation to. :param verbose: Show all details rather than truncating to ensure reasonable text width. :param full: XXXX Not sure what this does. :param show_ids: Show revision ids in the annotation output. :param branch: Branch to use for revision revno lookups """ if branch is None: branch = tree.branch if to_file is None: to_file = sys.stdout encoding = osutils.get_terminal_encoding() # Handle the show_ids case annotations = list(tree.annotate_iter(path)) if show_ids: return _show_id_annotations(annotations, to_file, full, encoding) if not getattr(tree, "get_revision_id", False): # Create a virtual revision to represent the current tree state. # Should get some more pending commit attributes, like pending tags, # bugfixes etc. current_rev = Revision(CURRENT_REVISION) current_rev.parent_ids = tree.get_parent_ids() try: current_rev.committer = branch.get_config_stack().get("email") except errors.NoWhoami: current_rev.committer = "local user" current_rev.message = "?" current_rev.timestamp = round(time.time(), 3) current_rev.timezone = osutils.local_time_offset() else: current_rev = None annotation = list(_expand_annotations(annotations, branch, current_rev)) _print_annotations(annotation, verbose, to_file, full, encoding) def _print_annotations(annotation, verbose, to_file, full, encoding): """Print annotations to to_file. :param to_file: The file to output the annotation to. :param verbose: Show all details rather than truncating to ensure reasonable text width. :param full: XXXX Not sure what this does. """ if len(annotation) == 0: max_origin_len = max_revno_len = 0 else: max_origin_len = max(len(x[1]) for x in annotation) max_revno_len = max(len(x[0]) for x in annotation) if not verbose: max_revno_len = min(max_revno_len, 12) max_revno_len = max(max_revno_len, 3) # Output the annotations prevanno = "" for revno_str, author, date_str, _line_rev_id, text in annotation: if verbose: anno = f"{revno_str:<{max_revno_len}} {author:<{max_origin_len}} {date_str:>8} " else: if len(revno_str) > max_revno_len: revno_str = revno_str[: max_revno_len - 1] + ">" anno = f"{revno_str:<{max_revno_len}} {author[:7]:<7} " if anno.lstrip() == "" and full: anno = prevanno # GZ 2017-05-21: Writing both unicode annotation and bytes from file # which the given to_file must cope with. to_file.write(anno) to_file.write("| {}\n".format(text.decode(encoding))) prevanno = anno def _show_id_annotations(annotations, to_file, full, encoding): if not annotations: return last_rev_id = None max_origin_len = max(len(origin) for origin, text in annotations) for origin, text in annotations: if full or last_rev_id != origin: this = origin else: this = b"" to_file.write( f"{this.decode('utf-8'):>{max_origin_len}} | {text.decode(encoding)}" ) last_rev_id = origin return def _expand_annotations(annotations, branch, current_rev=None): """Expand a file's annotations into command line UI ready tuples. Each tuple includes detailed information, such as the author name, and date string for the commit, rather than just the revision id. :param annotations: The annotations to expand. :param revision_id_to_revno: A map from id to revision numbers. :param branch: A locked branch to query for revision details. """ repository = branch.repository revision_ids = {o for o, t in annotations} if current_rev is not None: # This can probably become a function on MutableTree, get_revno_map # there, or something. last_revision = current_rev.revision_id # XXX: Partially Cloned from branch, uses the old_get_graph, eep. # XXX: The main difficulty is that we need to inject a single new node # (current_rev) into the graph before it gets numbered, etc. # Once KnownGraph gets an 'add_node()' function, we can use # VF.get_known_graph_ancestry(). graph = repository.get_graph() revision_graph = { key: value for key, value in graph.iter_ancestry(current_rev.parent_ids) if value is not None } revision_graph = _strip_NULL_ghosts(revision_graph) revision_graph[last_revision] = current_rev.parent_ids merge_sorted_revisions = tsort.merge_sort( revision_graph, last_revision, None, generate_revno=True ) revision_id_to_revno = { rev_id: revno for seq_num, rev_id, depth, revno, end_of_merge in merge_sorted_revisions } else: # TODO(jelmer): Only look up the revision ids that we need (i.e. those # in revision_ids). Possibly add a HPSS call that can look those up # in bulk over HPSS. revision_id_to_revno = branch.get_revision_id_to_revno_map() last_origin = None revisions = {} if CURRENT_REVISION in revision_ids: revision_id_to_revno[CURRENT_REVISION] = (f"{branch.revno() + 1}?",) revisions[CURRENT_REVISION] = current_rev revisions.update( entry for entry in repository.iter_revisions(revision_ids) if entry[1] is not None ) for origin, text in annotations: text = text.rstrip(b"\r\n") if origin == last_origin: (revno_str, author, date_str) = ("", "", "") else: last_origin = origin if origin not in revisions: (revno_str, author, date_str) = ("?", "?", "?") else: revno_str = ".".join(str(i) for i in revision_id_to_revno[origin]) rev = revisions[origin] tz = rev.timezone or 0 date_str = time.strftime("%Y%m%d", time.gmtime(rev.timestamp + tz)) # a lazy way to get something like the email address # TODO: Get real email address author = rev.get_apparent_authors()[0] _, email = config.parse_username(author) if email: author = email yield (revno_str, author, date_str, origin, text) def reannotate( parents_lines, new_lines, new_revision_id, _left_matching_blocks=None, heads_provider=None, ): """Create a new annotated version from new lines and parent annotations. :param parents_lines: List of annotated lines for all parents :param new_lines: The un-annotated new lines :param new_revision_id: The revision-id to associate with new lines (will often be CURRENT_REVISION) :param left_matching_blocks: a hint about which areas are common between the text and its left-hand-parent. The format is the SequenceMatcher.get_matching_blocks format (start_left, start_right, length_of_match). :param heads_provider: An object which provides a .heads() call to resolve if any revision ids are children of others. If None, then any ancestry disputes will be resolved with new_revision_id """ if len(parents_lines) == 0: lines = [(new_revision_id, line) for line in new_lines] elif len(parents_lines) == 1: lines = _reannotate( parents_lines[0], new_lines, new_revision_id, _left_matching_blocks ) elif len(parents_lines) == 2: left = _reannotate( parents_lines[0], new_lines, new_revision_id, _left_matching_blocks ) lines = _reannotate_annotated( parents_lines[1], new_lines, new_revision_id, left, heads_provider ) else: reannotations = [ _reannotate( parents_lines[0], new_lines, new_revision_id, _left_matching_blocks ) ] reannotations.extend( _reannotate(p, new_lines, new_revision_id) for p in parents_lines[1:] ) lines = [] for annos in zip(*reannotations): origins = {a for a, l in annos} if len(origins) == 1: # All the parents agree, so just return the first one lines.append(annos[0]) else: line = annos[0][1] if len(origins) == 2 and new_revision_id in origins: origins.remove(new_revision_id) if len(origins) == 1: lines.append((origins.pop(), line)) else: lines.append((new_revision_id, line)) return lines def _reannotate(parent_lines, new_lines, new_revision_id, matching_blocks=None): new_cur = 0 if matching_blocks is None: plain_parent_lines = [l for r, l in parent_lines] matcher = patiencediff.PatienceSequenceMatcher( None, plain_parent_lines, new_lines ) matching_blocks = matcher.get_matching_blocks() lines = [] for i, j, n in matching_blocks: for line in new_lines[new_cur:j]: lines.append((new_revision_id, line)) lines.extend(parent_lines[i : i + n]) new_cur = j + n return lines def _get_matching_blocks(old, new): matcher = patiencediff.PatienceSequenceMatcher(None, old, new) return matcher.get_matching_blocks() _break_annotation_tie = None def _old_break_annotation_tie(annotated_lines): """Chose an attribution between several possible ones. :param annotated_lines: A list of tuples ((file_id, rev_id), line) where the lines are identical but the revids different while no parent relation exist between them :return : The "winning" line. This must be one with a revid that guarantees that further criss-cross merges will converge. Failing to do so have performance implications. """ # sort lexicographically so that we always get a stable result. # TODO: while 'sort' is the easiest (and nearly the only possible solution) # with the current implementation, chosing the oldest revision is known to # provide better results (as in matching user expectations). The most # common use case being manual cherry-pick from an already existing # revision. return sorted(annotated_lines)[0] def _find_matching_unannotated_lines( output_lines, plain_child_lines, child_lines, start_child, end_child, right_lines, start_right, end_right, heads_provider, revision_id, ): """Find lines in plain_right_lines that match the existing lines. :param output_lines: Append final annotated lines to this list :param plain_child_lines: The unannotated new lines for the child text :param child_lines: Lines for the child text which have been annotated for the left parent :param start_child: Position in plain_child_lines and child_lines to start the match searching :param end_child: Last position in plain_child_lines and child_lines to search for a match :param right_lines: The annotated lines for the whole text for the right parent :param start_right: Position in right_lines to start the match :param end_right: Last position in right_lines to search for a match :param heads_provider: When parents disagree on the lineage of a line, we need to check if one side supersedes the other :param revision_id: The label to give if a line should be labeled 'tip' """ output_extend = output_lines.extend output_append = output_lines.append # We need to see if any of the unannotated lines match plain_right_subset = [l for a, l in right_lines[start_right:end_right]] plain_child_subset = plain_child_lines[start_child:end_child] match_blocks = _get_matching_blocks(plain_right_subset, plain_child_subset) last_child_idx = 0 for right_idx, child_idx, match_len in match_blocks: # All the lines that don't match are just passed along if child_idx > last_child_idx: output_extend( child_lines[start_child + last_child_idx : start_child + child_idx] ) for offset in range(match_len): left = child_lines[start_child + child_idx + offset] right = right_lines[start_right + right_idx + offset] if left[0] == right[0]: # The annotations match, just return the left one output_append(left) elif left[0] == revision_id: # The left parent marked this as unmatched, so let the # right parent claim it output_append(right) else: # Left and Right both claim this line if heads_provider is None: output_append((revision_id, left[1])) else: heads = heads_provider.heads((left[0], right[0])) if len(heads) == 1: output_append((next(iter(heads)), left[1])) else: # Both claim different origins, get a stable result. # If the result is not stable, there is a risk a # performance degradation as criss-cross merges will # flip-flop the attribution. if _break_annotation_tie is None: output_append(_old_break_annotation_tie([left, right])) else: output_append(_break_annotation_tie([left, right])) last_child_idx = child_idx + match_len def _reannotate_annotated( right_parent_lines, new_lines, new_revision_id, annotated_lines, heads_provider ): """Update the annotations for a node based on another parent. :param right_parent_lines: A list of annotated lines for the right-hand parent. :param new_lines: The unannotated new lines. :param new_revision_id: The revision_id to attribute to lines which are not present in either parent. :param annotated_lines: A list of annotated lines. This should be the annotation of new_lines based on parents seen so far. :param heads_provider: When parents disagree on the lineage of a line, we need to check if one side supersedes the other. """ if len(new_lines) != len(annotated_lines): raise AssertionError("mismatched new_lines and annotated_lines") # First compare the newly annotated lines with the right annotated lines. # Lines which were not changed in left or right should match. This tends to # be the bulk of the lines, and they will need no further processing. lines = [] lines_extend = lines.extend # The line just after the last match from the right side last_right_idx = 0 last_left_idx = 0 matching_left_and_right = _get_matching_blocks(right_parent_lines, annotated_lines) for right_idx, left_idx, match_len in matching_left_and_right: # annotated lines from last_left_idx to left_idx did not match the # lines from last_right_idx to right_idx, the raw lines should be # compared to determine what annotations need to be updated if last_right_idx == right_idx or last_left_idx == left_idx: # One of the sides is empty, so this is a pure insertion lines_extend(annotated_lines[last_left_idx:left_idx]) else: # We need to see if any of the unannotated lines match _find_matching_unannotated_lines( lines, new_lines, annotated_lines, last_left_idx, left_idx, right_parent_lines, last_right_idx, right_idx, heads_provider, new_revision_id, ) last_right_idx = right_idx + match_len last_left_idx = left_idx + match_len # If left and right agree on a range, just push that into the output lines_extend(annotated_lines[left_idx : left_idx + match_len]) return lines try: from breezy._annotator_pyx import Annotator except ImportError as e: osutils.failed_to_load_extension(e) from breezy._annotator_py import Annotator # noqa: F401 breezy-3.3.11/breezy/archive/000077500000000000000000000000001477433716000160345ustar00rootroot00000000000000breezy-3.3.11/breezy/archive/__init__.py000066400000000000000000000066211477433716000201520ustar00rootroot00000000000000# Copyright (C) 2018 Breezy Developers # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Export trees to tarballs, zipfiles, etc.""" from collections.abc import Iterator from typing import cast from .. import errors, registry class ArchiveFormatInfo: def __init__(self, extensions): self.extensions = extensions class ArchiveFormatRegistry(registry.Registry): """Registry of archive formats.""" def __init__(self): self._extension_map = {} super().__init__() @property def extensions(self): return self._extension_map.keys() def register(self, key, factory, extensions, help=None): """Register an archive format.""" registry.Registry.register( self, key, factory, help, ArchiveFormatInfo(extensions) ) self._register_extensions(key, extensions) def register_lazy(self, key, module_name, member_name, extensions, help=None): registry.Registry.register_lazy( self, key, module_name, member_name, help, ArchiveFormatInfo(extensions) ) self._register_extensions(key, extensions) def _register_extensions(self, name, extensions): for ext in extensions: self._extension_map[ext] = name def get_format_from_filename(self, filename): """Determine the archive format from an extension. :param filename: Filename to guess from :return: A format name, or None """ for ext, format in self._extension_map.items(): if filename.endswith(ext): return format else: return None def create_archive( format, tree, name, root=None, subdir=None, force_mtime=None, recurse_nested=False ) -> Iterator[bytes]: try: archive_fn = format_registry.get(format) except KeyError as exc: raise errors.NoSuchExportFormat(format) from exc return cast( Iterator[bytes], archive_fn( tree, name, root=root, subdir=subdir, force_mtime=force_mtime, recurse_nested=recurse_nested, ), ) format_registry = ArchiveFormatRegistry() format_registry.register_lazy( "tar", "breezy.archive.tar", "plain_tar_generator", [".tar"], ) format_registry.register_lazy( "tgz", "breezy.archive.tar", "tgz_generator", [".tar.gz", ".tgz"] ) format_registry.register_lazy( "tbz2", "breezy.archive.tar", "tbz_generator", [".tar.bz2", ".tbz2"] ) format_registry.register_lazy( "tlzma", "breezy.archive.tar", "tar_lzma_generator", [".tar.lzma"] ) format_registry.register_lazy( "txz", "breezy.archive.tar", "tar_xz_generator", [".tar.xz"] ) format_registry.register_lazy( "zip", "breezy.archive.zip", "zip_archive_generator", [".zip"] ) breezy-3.3.11/breezy/archive/tar.py000066400000000000000000000160311477433716000171750ustar00rootroot00000000000000# Copyright (C) 2005, 2006, 2008-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Export a tree to a tarball.""" import os import tarfile from contextlib import closing from io import BytesIO from .. import errors, osutils from ..export import _export_iter_entries def prepare_tarball_item(tree, root, final_path, tree_path, entry, force_mtime=None): """Prepare a tarball item for exporting. :param tree: Tree to export :param final_path: Final path to place item :param tree_path: Path for the entry in the tree :param entry: Entry to export :param force_mtime: Option mtime to force, instead of using tree timestamps. Returns a (tarinfo, fileobj) tuple """ filename = osutils.pathjoin(root, final_path) item = tarfile.TarInfo(filename) if force_mtime is not None: item.mtime = force_mtime else: item.mtime = tree.get_file_mtime(tree_path) if entry.kind == "file": item.type = tarfile.REGTYPE if tree.is_executable(tree_path): item.mode = 0o755 else: item.mode = 0o644 # This brings the whole file into memory, but that's almost needed for # the tarfile contract, which wants the size of the file up front. We # want to make sure it doesn't change, and we need to read it in one # go for content filtering. content = tree.get_file_text(tree_path) item.size = len(content) fileobj = BytesIO(content) elif entry.kind in ("directory", "tree-reference"): item.type = tarfile.DIRTYPE item.name += "/" item.size = 0 item.mode = 0o755 fileobj = None elif entry.kind == "symlink": item.type = tarfile.SYMTYPE item.size = 0 item.mode = 0o755 item.linkname = tree.get_symlink_target(tree_path) fileobj = None else: raise errors.BzrError( "don't know how to export {{{}}} of kind {!r}".format( final_path, entry.kind ) ) return (item, fileobj) def tarball_generator( tree, root, subdir=None, force_mtime=None, format="", recurse_nested=False ): """Export tree contents to a tarball. Args: tree: Tree to export subdir: Sub directory to export force_mtime: Option mtime to force, instead of using tree timestamps. Returns: A generator that will produce file content chunks. """ buf = BytesIO() with ( closing(tarfile.open(None, "w:{}".format(format), buf)) as ball, tree.lock_read(), ): for final_path, tree_path, entry in _export_iter_entries( tree, subdir, recurse_nested=recurse_nested ): (item, fileobj) = prepare_tarball_item( tree, root, final_path, tree_path, entry, force_mtime ) ball.addfile(item, fileobj) # Yield the data that was written so far, rinse, repeat. yield buf.getvalue() buf.truncate(0) buf.seek(0) yield buf.getvalue() def tgz_generator(tree, dest, root, subdir, force_mtime=None, recurse_nested=False): """Export this tree to a new tar file. `dest` will be created holding the contents of this tree; if it already exists, it will be clobbered, like with "tar -c". """ with tree.lock_read(): import gzip if force_mtime is not None: root_mtime = force_mtime elif getattr(tree, "repository", None) and getattr( tree, "get_revision_id", None ): # If this is a revision tree, use the revisions' timestamp rev = tree.repository.get_revision(tree.get_revision_id()) root_mtime = rev.timestamp elif tree.is_versioned(""): root_mtime = tree.get_file_mtime("") else: root_mtime = None basename = None # gzip file is used with an explicit fileobj so that # the basename can be stored in the gzip file rather than # dest. (bug 102234) basename = os.path.basename(dest) buf = BytesIO() zipstream = gzip.GzipFile(basename, "w", fileobj=buf, mtime=root_mtime) for chunk in tarball_generator( tree, root, subdir, force_mtime, recurse_nested=recurse_nested ): zipstream.write(chunk) # Yield the data that was written so far, rinse, repeat. yield buf.getvalue() buf.truncate(0) buf.seek(0) # Closing zipstream may trigger writes to stream zipstream.close() yield buf.getvalue() def tbz_generator(tree, dest, root, subdir, force_mtime=None, recurse_nested=False): """Export this tree to a new tar file. `dest` will be created holding the contents of this tree; if it already exists, it will be clobbered, like with "tar -c". """ return tarball_generator( tree, root, subdir, force_mtime, format="bz2", recurse_nested=recurse_nested ) def plain_tar_generator( tree, dest, root, subdir, force_mtime=None, recurse_nested=False ): """Export this tree to a new tar file. `dest` will be created holding the contents of this tree; if it already exists, it will be clobbered, like with "tar -c". """ return tarball_generator( tree, root, subdir, force_mtime, format="", recurse_nested=recurse_nested ) def tar_xz_generator(tree, dest, root, subdir, force_mtime=None, recurse_nested=False): return tar_lzma_generator( tree, dest, root, subdir, force_mtime, "xz", recurse_nested=recurse_nested ) def tar_lzma_generator( tree, dest, root, subdir, force_mtime=None, compression_format="alone", recurse_nested=False, ): """Export this tree to a new .tar.lzma file. `dest` will be created holding the contents of this tree; if it already exists, it will be clobbered, like with "tar -c". """ try: import lzma except ModuleNotFoundError as exc: raise errors.DependencyNotPresent("lzma", e) from exc compressor = lzma.LZMACompressor( format={ "xz": lzma.FORMAT_XZ, "raw": lzma.FORMAT_RAW, "alone": lzma.FORMAT_ALONE, }[compression_format] ) for chunk in tarball_generator( tree, root, subdir, force_mtime=force_mtime, recurse_nested=recurse_nested ): yield compressor.compress(chunk) yield compressor.flush() breezy-3.3.11/breezy/archive/zip.py000066400000000000000000000072551477433716000172210ustar00rootroot00000000000000# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Export a Tree to a zip file.""" import stat import tempfile import time import zipfile from contextlib import closing from .. import osutils from ..export import _export_iter_entries from ..trace import mutter # Windows expects this bit to be set in the 'external_attr' section, # or it won't consider the entry a directory. ZIP_DIRECTORY_BIT = 1 << 4 FILE_PERMISSIONS = 0o644 << 16 DIR_PERMISSIONS = 0o755 << 16 _FILE_ATTR = stat.S_IFREG | FILE_PERMISSIONS _DIR_ATTR = stat.S_IFDIR | ZIP_DIRECTORY_BIT | DIR_PERMISSIONS def zip_archive_generator( tree, dest, root, subdir=None, force_mtime=None, recurse_nested=False ): """Export this tree to a new zip file. `dest` will be created holding the contents of this tree; if it already exists, it will be overwritten". """ compression = zipfile.ZIP_DEFLATED with tempfile.SpooledTemporaryFile() as buf: with closing(zipfile.ZipFile(buf, "w", compression)) as zipf, tree.lock_read(): for dp, tp, ie in _export_iter_entries( tree, subdir, recurse_nested=recurse_nested ): mutter(" export {%s} kind %s to %s", tp, ie.kind, dest) # zipfile.ZipFile switches all paths to forward # slashes anyway, so just stick with that. if force_mtime is not None: mtime = force_mtime else: mtime = tree.get_file_mtime(tp) date_time = time.localtime(mtime)[:6] filename = osutils.pathjoin(root, dp) if ie.kind == "file": zinfo = zipfile.ZipInfo(filename=filename, date_time=date_time) zinfo.compress_type = compression zinfo.external_attr = _FILE_ATTR content = tree.get_file_text(tp) zipf.writestr(zinfo, content) elif ie.kind in ("directory", "tree-reference"): # Directories must contain a trailing slash, to indicate # to the zip routine that they are really directories and # not just empty files. zinfo = zipfile.ZipInfo( filename=filename + "/", date_time=date_time ) zinfo.compress_type = compression zinfo.external_attr = _DIR_ATTR zipf.writestr(zinfo, "") elif ie.kind == "symlink": zinfo = zipfile.ZipInfo( filename=(filename + ".lnk"), date_time=date_time ) zinfo.compress_type = compression zinfo.external_attr = _FILE_ATTR zipf.writestr(zinfo, tree.get_symlink_target(tp)) # Urgh, headers are written last since they include e.g. file size. # So we have to buffer it all :( buf.seek(0) yield from osutils.file_iterator(buf) breezy-3.3.11/breezy/atomicfile.py000066400000000000000000000074111477433716000171040ustar00rootroot00000000000000# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import stat from breezy import errors, osutils # not forksafe - but we dont fork. _pid = os.getpid() _hostname = None class AtomicFileAlreadyClosed(errors.PathError): _fmt = '"%(function)s" called on an AtomicFile after it was closed: "%(path)s"' def __init__(self, path, function): errors.PathError.__init__(self, path=path, extra=None) self.function = function class AtomicFile: """A file that does an atomic-rename to move into place. This also causes hardlinks to break when it's written out. Open this as for a regular file, then use commit() to move into place or abort() to cancel. """ __slots__ = ["_fd", "realfilename", "tmpfilename"] def __init__(self, filename, mode="wb", new_mode=None): global _hostname self._fd = None if _hostname is None: _hostname = osutils.get_host_name() self.tmpfilename = f"{filename}.{_pid}.{_hostname}.{osutils.rand_chars(10)}.tmp" self.realfilename = filename flags = os.O_EXCL | os.O_CREAT | os.O_WRONLY | osutils.O_NOINHERIT if mode == "wb": flags |= osutils.O_BINARY elif mode != "wt": raise ValueError("invalid AtomicFile mode {!r}".format(mode)) if new_mode is not None: local_mode = new_mode else: local_mode = 0o666 # Use a low level fd operation to avoid chmodding later. # This may not succeed, but it should help most of the time self._fd = os.open(self.tmpfilename, flags, local_mode) if new_mode is not None: # Because of umask issues, we may need to chmod anyway # the common case is that we won't, though. st = os.fstat(self._fd) if stat.S_IMODE(st.st_mode) != new_mode: osutils.chmod_if_possible(self.tmpfilename, new_mode) def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.realfilename) def write(self, data): """Write some data to the file. Like file.write().""" os.write(self._fd, data) def _close_tmpfile(self, func_name): """Close the local temp file in preparation for commit or abort.""" if self._fd is None: raise AtomicFileAlreadyClosed(path=self.realfilename, function=func_name) fd = self._fd self._fd = None os.close(fd) def commit(self): """Close the file and move to final name.""" self._close_tmpfile("commit") osutils.rename(self.tmpfilename, self.realfilename) def abort(self): """Discard temporary file without committing changes.""" self._close_tmpfile("abort") os.remove(self.tmpfilename) def close(self): """Discard the file unless already committed.""" if self._fd is not None: self.abort() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.abort() return False self.commit() breezy-3.3.11/breezy/bedding.py000066400000000000000000000225541477433716000163710ustar00rootroot00000000000000# Copyright (C) 2005-2014, 2016 Canonical Ltd # Copyright (C) 2019 Breezy developers # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Functions for deriving user configuration from system environment.""" import os import sys from .lazy_import import lazy_import lazy_import( globals(), """ from breezy import ( osutils, trace, win32utils, ) """, ) from . import errors def ensure_config_dir_exists(path=None): """Make sure a configuration directory exists. This makes sure that the directory exists. On windows, since configuration directories are 2 levels deep, it makes sure both the directory and the parent directory exists. """ if path is None: path = config_dir() if not os.path.isdir(path): parent_dir = os.path.dirname(path) if not os.path.isdir(parent_dir): trace.mutter("creating config parent directory: %r", parent_dir) os.mkdir(parent_dir) osutils.copy_ownership_from_path(parent_dir) trace.mutter("creating config directory: %r", path) os.mkdir(path) osutils.copy_ownership_from_path(path) def bazaar_config_dir(): """Return per-user configuration directory as unicode string. By default this is %APPDATA%/bazaar/2.0 on Windows, ~/.bazaar on Mac OS X and Linux. On Mac OS X and Linux, if there is a $XDG_CONFIG_HOME/bazaar directory, that will be used instead TODO: Global option --config-dir to override this. """ base = os.environ.get("BZR_HOME") if sys.platform == "win32": if base is None: base = win32utils.get_appdata_location() if base is None: base = win32utils.get_home_location() return osutils.pathjoin(base, "bazaar", "2.0") if base is None: xdg_dir = os.environ.get("XDG_CONFIG_HOME") if xdg_dir is None: xdg_dir = osutils.pathjoin(osutils._get_home_dir(), ".config") xdg_dir = osutils.pathjoin(xdg_dir, "bazaar") if osutils.isdir(xdg_dir): trace.mutter("Using configuration in XDG directory {}.".format(xdg_dir)) return xdg_dir base = osutils._get_home_dir() return osutils.pathjoin(base, ".bazaar") def _config_dir(): """Return per-user configuration directory as unicode string. By default this is %APPDATA%/breezy on Windows, $XDG_CONFIG_HOME/breezy on Mac OS X and Linux. If the breezy config directory doesn't exist but the bazaar one (see bazaar_config_dir()) does, use that instead. """ # TODO: Global option --config-dir to override this. base = os.environ.get("BRZ_HOME") if sys.platform == "win32": if base is None: base = win32utils.get_appdata_location() if base is None: # Assume that AppData location is ALWAYS DEFINED, # and don't look for %HOME%, as we aren't sure about # where the files should be stored in %HOME%: # on other platforms the directory is ~/.config/, # but that would be incompatible with older Bazaar versions. raise RuntimeError("Unable to determine AppData location") if base is None: base = os.environ.get("XDG_CONFIG_HOME") if base is None: base = osutils.pathjoin(osutils._get_home_dir(), ".config") breezy_dir = osutils.pathjoin(base, "breezy") if osutils.isdir(breezy_dir): return (breezy_dir, "breezy") # If the breezy directory doesn't exist, but the bazaar one does, use that: bazaar_dir = bazaar_config_dir() if osutils.isdir(bazaar_dir): trace.mutter("Using Bazaar configuration directory (%s)", bazaar_dir) return (bazaar_dir, "bazaar") return (breezy_dir, "breezy") def config_dir(): """Return per-user configuration directory as unicode string. By default this is %APPDATA%/breezy on Windows, $XDG_CONFIG_HOME/breezy on Mac OS X and Linux. If the breezy config directory doesn't exist but the bazaar one (see bazaar_config_dir()) does, use that instead. """ return _config_dir()[0] def config_path(): """Return per-user configuration ini file filename.""" path, kind = _config_dir() if kind == "bazaar": return osutils.pathjoin(path, "bazaar.conf") else: return osutils.pathjoin(path, "breezy.conf") def locations_config_path(): """Return per-user configuration ini file filename.""" return osutils.pathjoin(config_dir(), "locations.conf") def authentication_config_path(): """Return per-user authentication ini file filename.""" return osutils.pathjoin(config_dir(), "authentication.conf") def user_ignore_config_path(): """Return per-user authentication ini file filename.""" return osutils.pathjoin(config_dir(), "ignore") def crash_dir(): """Return the directory name to store crash files. This doesn't implicitly create it. On Windows it's in the config directory; elsewhere it's /var/crash which may be monitored by apport. It can be overridden by $APPORT_CRASH_DIR. """ if sys.platform == "win32": return osutils.pathjoin(config_dir(), "Crash") else: # XXX: hardcoded in apport_python_hook.py; therefore here too -- mbp # 2010-01-31 return os.environ.get("APPORT_CRASH_DIR", "/var/crash") def cache_dir(): """Return the cache directory to use.""" base = os.environ.get("BRZ_HOME") if sys.platform in "win32": if base is None: base = win32utils.get_local_appdata_location() if base is None: base = win32utils.get_home_location() else: base = os.environ.get("XDG_CACHE_HOME") if base is None: base = osutils.pathjoin(osutils._get_home_dir(), ".cache") cache_dir = osutils.pathjoin(base, "breezy") # GZ 2019-06-15: Move responsibility for ensuring dir exists elsewhere? if not os.path.exists(cache_dir): os.makedirs(cache_dir) return cache_dir def _get_default_mail_domain(mailname_file="/etc/mailname"): """If possible, return the assumed default email domain. :returns: string mail domain, or None. """ if sys.platform == "win32": # No implementation yet; patches welcome return None try: f = open(mailname_file) except OSError: return None try: domain = f.readline().strip() return domain finally: f.close() def default_email(): v = os.environ.get("BRZ_EMAIL") if v: return v v = os.environ.get("EMAIL") if v: return v name, email = _auto_user_id() if name and email: return "{} <{}>".format(name, email) elif email: return email raise errors.NoWhoami() def _auto_user_id(): """Calculate automatic user identification. :returns: (realname, email), either of which may be None if they can't be determined. Only used when none is set in the environment or the id file. This only returns an email address if we can be fairly sure the address is reasonable, ie if /etc/mailname is set on unix. This doesn't use the FQDN as the default domain because that may be slow, and it doesn't use the hostname alone because that's not normally a reasonable address. """ if sys.platform == "win32": # No implementation to reliably determine Windows default mail # address; please add one. return None, None default_mail_domain = _get_default_mail_domain() if not default_mail_domain: return None, None import pwd uid = os.getuid() try: w = pwd.getpwuid(uid) except KeyError: trace.mutter("no passwd entry for uid %d?", uid) return None, None # we try utf-8 first, because on many variants (like Linux), # /etc/passwd "should" be in utf-8, and because it's unlikely to give # false positives. (many users will have their user encoding set to # latin-1, which cannot raise UnicodeError.) gecos = w.pw_gecos if isinstance(gecos, bytes): try: gecos = gecos.decode("utf-8") encoding = "utf-8" except UnicodeError: try: encoding = osutils.get_user_encoding() gecos = gecos.decode(encoding) except UnicodeError: trace.mutter("cannot decode passwd entry {}".format(w)) return None, None username = w.pw_name if isinstance(username, bytes): try: username = username.decode(encoding) except UnicodeError: trace.mutter("cannot decode passwd entry {}".format(w)) return None, None comma = gecos.find(",") if comma == -1: realname = gecos else: realname = gecos[:comma] return realname, (username + "@" + default_mail_domain) breezy-3.3.11/breezy/bisect.py000066400000000000000000000402161477433716000162410ustar00rootroot00000000000000# Copyright (C) 2006-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """bisect command implementations.""" import sys from . import revision as _mod_revision from .commands import Command from .controldir import ControlDir from .errors import CommandError from .option import Option from .trace import note BISECT_INFO_PATH = "bisect" BISECT_REV_PATH = "bisect_revid" class BisectCurrent: """Bisect class for managing the current revision.""" def __init__(self, controldir, filename=BISECT_REV_PATH): self._filename = filename self._controldir = controldir self._branch = self._controldir.open_branch() if self._controldir.control_transport.has(filename): self._revid = self._controldir.control_transport.get_bytes(filename).strip() else: self._revid = self._branch.last_revision() def _save(self): """Save the current revision.""" self._controldir.control_transport.put_bytes( self._filename, self._revid + b"\n" ) def get_current_revid(self): """Return the current revision id.""" return self._revid def get_current_revno(self): """Return the current revision number as a tuple.""" return self._branch.revision_id_to_dotted_revno(self._revid) def get_parent_revids(self): """Return the IDs of the current revision's predecessors.""" repo = self._branch.repository with repo.lock_read(): retval = repo.get_parent_map([self._revid]).get(self._revid, None) return retval def is_merge_point(self): """Is the current revision a merge point?""" return len(self.get_parent_revids()) > 1 def show_rev_log(self, outf): """Write the current revision's log entry to a file.""" rev = self._branch.repository.get_revision(self._revid) revno = ".".join([str(x) for x in self.get_current_revno()]) outf.write( "On revision {} ({}):\n{}\n".format(revno, rev.revision_id, rev.message) ) def switch(self, revid): """Switch the current revision to the given revid.""" working = self._controldir.open_workingtree() if isinstance(revid, int): revid = self._branch.get_rev_id(revid) elif isinstance(revid, list): revid = revid[0].in_history(working.branch).rev_id working.revert(None, working.branch.repository.revision_tree(revid), False) self._revid = revid self._save() def reset(self): """Revert bisection, setting the working tree to normal.""" working = self._controldir.open_workingtree() last_rev = working.branch.last_revision() rev_tree = working.branch.repository.revision_tree(last_rev) working.revert(None, rev_tree, False) if self._controldir.control_transport.has(BISECT_REV_PATH): self._controldir.control_transport.delete(BISECT_REV_PATH) class BisectLog: """Bisect log file handler.""" def __init__(self, controldir, filename=BISECT_INFO_PATH): self._items = [] self._current = BisectCurrent(controldir) self._controldir = controldir self._branch = None self._high_revid = None self._low_revid = None self._middle_revid = None self._filename = filename self.load() def _open_for_read(self): """Open log file for reading.""" if self._filename: return self._controldir.control_transport.get(self._filename) else: return sys.stdin def _load_tree(self): """Load bzr information.""" if not self._branch: self._branch = self._controldir.open_branch() def _find_range_and_middle(self, branch_last_rev=None): """Find the current revision range, and the midpoint.""" self._load_tree() self._middle_revid = None if not branch_last_rev: last_revid = self._branch.last_revision() else: last_revid = branch_last_rev repo = self._branch.repository with repo.lock_read(): graph = repo.get_graph() rev_sequence = graph.iter_lefthand_ancestry( last_revid, (_mod_revision.NULL_REVISION,) ) high_revid = None low_revid = None between_revs = [] for revision in rev_sequence: between_revs.insert(0, revision) matches = [ x[1] for x in self._items if x[0] == revision and x[1] in ("yes", "no") ] if not matches: continue if len(matches) > 1: raise RuntimeError("revision {} duplicated".format(revision)) if matches[0] == "yes": high_revid = revision between_revs = [] elif matches[0] == "no": low_revid = revision del between_revs[0] break if not high_revid: high_revid = last_revid if not low_revid: low_revid = self._branch.get_rev_id(1) # The spread must include the high revision, to bias # odd numbers of intervening revisions towards the high # side. spread = len(between_revs) + 1 if spread < 2: middle_index = 0 else: middle_index = (spread // 2) - 1 if len(between_revs) > 0: self._middle_revid = between_revs[middle_index] else: self._middle_revid = high_revid self._high_revid = high_revid self._low_revid = low_revid def _switch_wc_to_revno(self, revno, outf): """Move the working tree to the given revno.""" self._current.switch(revno) self._current.show_rev_log(outf=outf) def _set_status(self, revid, status): """Set the bisect status for the given revid.""" if not self.is_done(): if status != "done" and revid in [ x[0] for x in self._items if x[1] in ["yes", "no"] ]: raise RuntimeError("attempting to add revid {} twice".format(revid)) self._items.append((revid, status)) def change_file_name(self, filename): """Switch log files.""" self._filename = filename def load(self): """Load the bisection log.""" self._items = [] if self._controldir.control_transport.has(self._filename): revlog = self._open_for_read() for line in revlog: (revid, status) = line.split() self._items.append((revid, status.decode("ascii"))) def save(self): """Save the bisection log.""" contents = b"".join( (b"%s %s\n" % (revid, status.encode("ascii"))) for (revid, status) in self._items ) if self._filename: self._controldir.control_transport.put_bytes(self._filename, contents) else: sys.stdout.write(contents) def is_done(self): """Report whether we've found the right revision.""" return len(self._items) > 0 and self._items[-1][1] == "done" def set_status_from_revspec(self, revspec, status): """Set the bisection status for the revision in revspec.""" self._load_tree() revid = revspec[0].in_history(self._branch).rev_id self._set_status(revid, status) def set_current(self, status): """Set the current revision to the given bisection status.""" self._set_status(self._current.get_current_revid(), status) def is_merge_point(self, revid): return len(self.get_parent_revids(revid)) > 1 def get_parent_revids(self, revid): repo = self._branch.repository with repo.lock_read(): retval = repo.get_parent_map([revid]).get(revid, None) return retval def bisect(self, outf): """Using the current revision's status, do a bisection.""" self._find_range_and_middle() # If we've found the "final" revision, check for a # merge point. while ( self._middle_revid == self._high_revid or self._middle_revid == self._low_revid ) and self.is_merge_point(self._middle_revid): for parent in self.get_parent_revids(self._middle_revid): if parent == self._low_revid: continue else: self._find_range_and_middle(parent) break self._switch_wc_to_revno(self._middle_revid, outf) if ( self._middle_revid == self._high_revid or self._middle_revid == self._low_revid ): self.set_current("done") class cmd_bisect(Command): """Find an interesting commit using a binary search. Bisecting, in a nutshell, is a way to find the commit at which some testable change was made, such as the introduction of a bug or feature. By identifying a version which did not have the interesting change and a later version which did, a developer can test for the presence of the change at various points in the history, eventually ending up at the precise commit when the change was first introduced. This command uses subcommands to implement the search, each of which changes the state of the bisection. The subcommands are: brz bisect start Start a bisect, possibly clearing out a previous bisect. brz bisect yes [-r rev] The specified revision (or the current revision, if not given) has the characteristic we're looking for, brz bisect no [-r rev] The specified revision (or the current revision, if not given) does not have the characteristic we're looking for, brz bisect move -r rev Switch to a different revision manually. Use if the bisect algorithm chooses a revision that is not suitable. Try to move as little as possible. brz bisect reset Clear out a bisection in progress. brz bisect log [-o file] Output a log of the current bisection to standard output, or to the specified file. brz bisect replay Replay a previously-saved bisect log, forgetting any bisection that might be in progress. brz bisect run